problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_19716 | rasdani/github-patches | git_diff | rotki__rotki-5777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make sure that Docker image can properly shutdown
## Problem Definition
While trying to set up the proxy I noticed that the proxy didn't exit cleanly.
Instead, it exited with (code=137).
This might be because there is an infinite loop (intentional) monitoring the subprocesses, and this loop only exits if one of the subprocesses stops running.
This will cause the script to exit with code 1.
https://github.com/rotki/rotki/blob/53d36bc1f5aa0ff64fcfaf21f0ba6ca005a032eb/packaging/docker/entrypoint.py#L173
## Requirements
- Handle `SIGINT`/`SIGTERM` normally and exit cleanly (code=0)
- Make sure that if the subprocesses die we exit (code=1)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packaging/docker/entrypoint.py`
Content:
```
1 #!/usr/bin/python3
2 import json
3 import logging
4 import os
5 import shutil
6 import subprocess
7 import time
8 from datetime import datetime, timedelta
9 from pathlib import Path
10 from typing import Any, Dict, List, Optional
11
12 logger = logging.getLogger('monitor')
13 logging.basicConfig(level=logging.DEBUG)
14
15 DEFAULT_LOG_LEVEL = 'critical'
16
17
18 def can_delete(file: Path, cutoff: int) -> bool:
19 return int(os.stat(file).st_mtime) <= cutoff or file.name.startswith('_MEI')
20
21
22 def cleanup_tmp() -> None:
23 logger.info('Preparing to cleanup tmp directory')
24 tmp_dir = Path('/tmp/').glob('*')
25 cache_cutoff = datetime.today() - timedelta(hours=6)
26 cutoff_epoch = int(cache_cutoff.strftime("%s"))
27 to_delete = filter(lambda x: can_delete(x, cutoff_epoch), tmp_dir)
28
29 deleted = 0
30 skipped = 0
31
32 for item in to_delete:
33 path = Path(item)
34 if path.is_file():
35 try:
36 path.unlink()
37 deleted += 1
38 continue
39 except PermissionError:
40 skipped += 1
41 continue
42
43 try:
44 shutil.rmtree(item)
45 deleted += 1
46 except OSError:
47 skipped += 1
48 continue
49
50 logger.info(f'Deleted {deleted} files or directories, skipped {skipped} from /tmp')
51
52
53 def load_config_from_file() -> Optional[Dict[str, Any]]:
54 config_file = Path('/config/rotki_config.json')
55 if not config_file.exists():
56 logger.info('no config file provided')
57 return None
58
59 with open(config_file) as file:
60 try:
61 data = json.load(file)
62 return data
63 except json.JSONDecodeError as e:
64 logger.error(e)
65 return None
66
67
68 def load_config_from_env() -> Dict[str, Any]:
69 loglevel = os.environ.get('LOGLEVEL')
70 logfromothermodules = os.environ.get('LOGFROMOTHERMODDULES')
71 max_size_in_mb_all_logs = os.environ.get('MAX_SIZE_IN_MB_ALL_LOGS')
72 max_logfiles_num = os.environ.get('MAX_LOGFILES_NUM')
73 sqlite_instructions = os.environ.get('SQLITE_INSTRUCTIONS')
74
75 return {
76 'loglevel': loglevel,
77 'logfromothermodules': logfromothermodules,
78 'max_logfiles_num': max_logfiles_num,
79 'max_size_in_mb_all_logs': max_size_in_mb_all_logs,
80 'sqlite_instructions': sqlite_instructions,
81 }
82
83
84 def load_config() -> List[str]:
85 env_config = load_config_from_env()
86 file_config = load_config_from_file()
87
88 logger.info('loading config from env')
89
90 loglevel = env_config.get('loglevel')
91 log_from_other_modules = env_config.get('logfromothermodules')
92 max_logfiles_num = env_config.get('max_logfiles_num')
93 max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs')
94 sqlite_instructions = env_config.get('sqlite_instructions')
95
96 if file_config is not None:
97 logger.info('loading config from file')
98
99 if file_config.get('loglevel') is not None:
100 loglevel = file_config.get('loglevel')
101
102 if file_config.get('logfromothermodules') is not None:
103 log_from_other_modules = file_config.get('logfromothermodules')
104
105 if file_config.get('max_logfiles_num') is not None:
106 max_logfiles_num = file_config.get('max_logfiles_num')
107
108 if file_config.get('max_size_in_mb_all_logs') is not None:
109 max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs')
110
111 if file_config.get('sqlite_instructions') is not None:
112 sqlite_instructions = file_config.get('sqlite_instructions')
113
114 args = [
115 '--data-dir',
116 '/data',
117 '--logfile',
118 '/logs/rotki.log',
119 '--loglevel',
120 loglevel if loglevel is not None else DEFAULT_LOG_LEVEL,
121 ]
122
123 if log_from_other_modules is True:
124 args.append('--logfromothermodules')
125
126 if max_logfiles_num is not None:
127 args.append('--max-logfiles-num')
128 args.append(int(max_logfiles_num))
129
130 if max_size_in_mb_all_logs is not None:
131 args.append('--max-size-in-mb-all-logs')
132 args.append(int(max_size_in_mb_all_logs))
133
134 if sqlite_instructions is not None:
135 args.append('--sqlite-instructions')
136 args.append(int(sqlite_instructions))
137 return args
138
139
140 cleanup_tmp()
141
142 base_args = [
143 '/usr/sbin/rotki',
144 '--rest-api-port',
145 '4242',
146 '--websockets-api-port',
147 '4243',
148 '--api-cors',
149 'http://localhost:*/*,app://.',
150 '--api-host',
151 '0.0.0.0',
152 ]
153
154 config_args = load_config()
155 cmd = base_args + config_args
156
157 logger.info('starting rotki backend')
158
159 rotki = subprocess.Popen(cmd)
160
161 if rotki.returncode == 1:
162 logger.error('Failed to start rotki')
163 exit(1)
164
165 logger.info('starting nginx')
166
167 nginx = subprocess.Popen('nginx -g "daemon off;"', shell=True)
168
169 if nginx.returncode == 1:
170 logger.error('Failed to start nginx')
171 exit(1)
172
173 while True:
174 time.sleep(60)
175
176 if rotki.poll() is not None:
177 logger.error('rotki has terminated exiting')
178 exit(1)
179
180 if nginx.poll() is not None:
181 logger.error('nginx was not running')
182 exit(1)
183
184 logger.info('OK: processes still running')
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packaging/docker/entrypoint.py b/packaging/docker/entrypoint.py
--- a/packaging/docker/entrypoint.py
+++ b/packaging/docker/entrypoint.py
@@ -3,6 +3,7 @@
import logging
import os
import shutil
+from signal import signal, SIGINT, SIGTERM, SIGQUIT
import subprocess
import time
from datetime import datetime, timedelta
@@ -170,6 +171,30 @@
logger.error('Failed to start nginx')
exit(1)
+
+def terminate_process(process_name: str, process: subprocess.Popen) -> None:
+ logger.info(f'Terminating {process_name}')
+ if process.poll() is not None:
+ logger.error(f'{process_name} was not running. This means that some error occurred.')
+ exit(1)
+
+ process.terminate()
+ process.wait() # wait untill the process terminates
+
+
+def graceful_exit(signal, frame):
+ logger.info(f'Received signal {signal}. Exiting gracefully')
+ terminate_process('rotki', rotki)
+ terminate_process('nginx', nginx)
+ exit(0)
+
+
+# Handle exits via ctrl+c or via `docker stop` gracefully
+signal(SIGINT, graceful_exit)
+signal(SIGTERM, graceful_exit)
+signal(SIGQUIT, graceful_exit)
+
+
while True:
time.sleep(60)
| {"golden_diff": "diff --git a/packaging/docker/entrypoint.py b/packaging/docker/entrypoint.py\n--- a/packaging/docker/entrypoint.py\n+++ b/packaging/docker/entrypoint.py\n@@ -3,6 +3,7 @@\n import logging\n import os\n import shutil\n+from signal import signal, SIGINT, SIGTERM, SIGQUIT\n import subprocess\n import time\n from datetime import datetime, timedelta\n@@ -170,6 +171,30 @@\n logger.error('Failed to start nginx')\n exit(1)\n \n+\n+def terminate_process(process_name: str, process: subprocess.Popen) -> None:\n+ logger.info(f'Terminating {process_name}')\n+ if process.poll() is not None:\n+ logger.error(f'{process_name} was not running. This means that some error occurred.')\n+ exit(1)\n+\n+ process.terminate()\n+ process.wait() # wait untill the process terminates\n+\n+\n+def graceful_exit(signal, frame):\n+ logger.info(f'Received signal {signal}. Exiting gracefully')\n+ terminate_process('rotki', rotki)\n+ terminate_process('nginx', nginx)\n+ exit(0)\n+\n+\n+# Handle exits via ctrl+c or via `docker stop` gracefully\n+signal(SIGINT, graceful_exit)\n+signal(SIGTERM, graceful_exit)\n+signal(SIGQUIT, graceful_exit)\n+\n+\n while True:\n time.sleep(60)\n", "issue": "Make sure that Docker image can properly shutdown\n## Problem Definition\r\n\r\nWhile trying to set up the proxy I noticed that the proxy didn't exit cleanly. \r\nInstead, it exited with (code=137). \r\n\r\nThis might be because there is an infinite loop (intentional) monitoring the subprocesses, and this loop only exits if one of the subprocesses stops running.\r\n\r\nThis will cause the script to exit with code 1.\r\n\r\nhttps://github.com/rotki/rotki/blob/53d36bc1f5aa0ff64fcfaf21f0ba6ca005a032eb/packaging/docker/entrypoint.py#L173\r\n\r\n## Requirements\r\n\r\n- Handle `SIGINT`/`SIGTERM` normally and exit cleanly (code=0)\r\n- Make sure that if the subprocesses die we exit (code=1)\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\nimport json\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport time\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\nlogger = logging.getLogger('monitor')\nlogging.basicConfig(level=logging.DEBUG)\n\nDEFAULT_LOG_LEVEL = 'critical'\n\n\ndef can_delete(file: Path, cutoff: int) -> bool:\n return int(os.stat(file).st_mtime) <= cutoff or file.name.startswith('_MEI')\n\n\ndef cleanup_tmp() -> None:\n logger.info('Preparing to cleanup tmp directory')\n tmp_dir = Path('/tmp/').glob('*')\n cache_cutoff = datetime.today() - timedelta(hours=6)\n cutoff_epoch = int(cache_cutoff.strftime(\"%s\"))\n to_delete = filter(lambda x: can_delete(x, cutoff_epoch), tmp_dir)\n\n deleted = 0\n skipped = 0\n\n for item in to_delete:\n path = Path(item)\n if path.is_file():\n try:\n path.unlink()\n deleted += 1\n continue\n except PermissionError:\n skipped += 1\n continue\n\n try:\n shutil.rmtree(item)\n deleted += 1\n except OSError:\n skipped += 1\n continue\n\n logger.info(f'Deleted {deleted} files or directories, skipped {skipped} from /tmp')\n\n\ndef load_config_from_file() -> Optional[Dict[str, Any]]:\n config_file = Path('/config/rotki_config.json')\n if not config_file.exists():\n logger.info('no config file provided')\n return None\n\n with open(config_file) as file:\n try:\n data = json.load(file)\n return data\n except json.JSONDecodeError as e:\n logger.error(e)\n return None\n\n\ndef load_config_from_env() -> Dict[str, Any]:\n loglevel = os.environ.get('LOGLEVEL')\n logfromothermodules = os.environ.get('LOGFROMOTHERMODDULES')\n max_size_in_mb_all_logs = os.environ.get('MAX_SIZE_IN_MB_ALL_LOGS')\n max_logfiles_num = os.environ.get('MAX_LOGFILES_NUM')\n sqlite_instructions = os.environ.get('SQLITE_INSTRUCTIONS')\n\n return {\n 'loglevel': loglevel,\n 'logfromothermodules': logfromothermodules,\n 'max_logfiles_num': max_logfiles_num,\n 'max_size_in_mb_all_logs': max_size_in_mb_all_logs,\n 'sqlite_instructions': sqlite_instructions,\n }\n\n\ndef load_config() -> List[str]:\n env_config = load_config_from_env()\n file_config = load_config_from_file()\n\n logger.info('loading config from env')\n\n loglevel = env_config.get('loglevel')\n log_from_other_modules = env_config.get('logfromothermodules')\n max_logfiles_num = env_config.get('max_logfiles_num')\n max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs')\n sqlite_instructions = env_config.get('sqlite_instructions')\n\n if file_config is not None:\n logger.info('loading config from file')\n\n if file_config.get('loglevel') is not None:\n loglevel = file_config.get('loglevel')\n\n if file_config.get('logfromothermodules') is not None:\n log_from_other_modules = file_config.get('logfromothermodules')\n\n if file_config.get('max_logfiles_num') is not None:\n max_logfiles_num = file_config.get('max_logfiles_num')\n\n if file_config.get('max_size_in_mb_all_logs') is not None:\n max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs')\n\n if file_config.get('sqlite_instructions') is not None:\n sqlite_instructions = file_config.get('sqlite_instructions')\n\n args = [\n '--data-dir',\n '/data',\n '--logfile',\n '/logs/rotki.log',\n '--loglevel',\n loglevel if loglevel is not None else DEFAULT_LOG_LEVEL,\n ]\n\n if log_from_other_modules is True:\n args.append('--logfromothermodules')\n\n if max_logfiles_num is not None:\n args.append('--max-logfiles-num')\n args.append(int(max_logfiles_num))\n\n if max_size_in_mb_all_logs is not None:\n args.append('--max-size-in-mb-all-logs')\n args.append(int(max_size_in_mb_all_logs))\n\n if sqlite_instructions is not None:\n args.append('--sqlite-instructions')\n args.append(int(sqlite_instructions))\n return args\n\n\ncleanup_tmp()\n\nbase_args = [\n '/usr/sbin/rotki',\n '--rest-api-port',\n '4242',\n '--websockets-api-port',\n '4243',\n '--api-cors',\n 'http://localhost:*/*,app://.',\n '--api-host',\n '0.0.0.0',\n]\n\nconfig_args = load_config()\ncmd = base_args + config_args\n\nlogger.info('starting rotki backend')\n\nrotki = subprocess.Popen(cmd)\n\nif rotki.returncode == 1:\n logger.error('Failed to start rotki')\n exit(1)\n\nlogger.info('starting nginx')\n\nnginx = subprocess.Popen('nginx -g \"daemon off;\"', shell=True)\n\nif nginx.returncode == 1:\n logger.error('Failed to start nginx')\n exit(1)\n\nwhile True:\n time.sleep(60)\n\n if rotki.poll() is not None:\n logger.error('rotki has terminated exiting')\n exit(1)\n\n if nginx.poll() is not None:\n logger.error('nginx was not running')\n exit(1)\n\n logger.info('OK: processes still running')\n", "path": "packaging/docker/entrypoint.py"}], "after_files": [{"content": "#!/usr/bin/python3\nimport json\nimport logging\nimport os\nimport shutil\nfrom signal import signal, SIGINT, SIGTERM, SIGQUIT\nimport subprocess\nimport time\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\nlogger = logging.getLogger('monitor')\nlogging.basicConfig(level=logging.DEBUG)\n\nDEFAULT_LOG_LEVEL = 'critical'\n\n\ndef can_delete(file: Path, cutoff: int) -> bool:\n return int(os.stat(file).st_mtime) <= cutoff or file.name.startswith('_MEI')\n\n\ndef cleanup_tmp() -> None:\n logger.info('Preparing to cleanup tmp directory')\n tmp_dir = Path('/tmp/').glob('*')\n cache_cutoff = datetime.today() - timedelta(hours=6)\n cutoff_epoch = int(cache_cutoff.strftime(\"%s\"))\n to_delete = filter(lambda x: can_delete(x, cutoff_epoch), tmp_dir)\n\n deleted = 0\n skipped = 0\n\n for item in to_delete:\n path = Path(item)\n if path.is_file():\n try:\n path.unlink()\n deleted += 1\n continue\n except PermissionError:\n skipped += 1\n continue\n\n try:\n shutil.rmtree(item)\n deleted += 1\n except OSError:\n skipped += 1\n continue\n\n logger.info(f'Deleted {deleted} files or directories, skipped {skipped} from /tmp')\n\n\ndef load_config_from_file() -> Optional[Dict[str, Any]]:\n config_file = Path('/config/rotki_config.json')\n if not config_file.exists():\n logger.info('no config file provided')\n return None\n\n with open(config_file) as file:\n try:\n data = json.load(file)\n return data\n except json.JSONDecodeError as e:\n logger.error(e)\n return None\n\n\ndef load_config_from_env() -> Dict[str, Any]:\n loglevel = os.environ.get('LOGLEVEL')\n logfromothermodules = os.environ.get('LOGFROMOTHERMODDULES')\n max_size_in_mb_all_logs = os.environ.get('MAX_SIZE_IN_MB_ALL_LOGS')\n max_logfiles_num = os.environ.get('MAX_LOGFILES_NUM')\n sqlite_instructions = os.environ.get('SQLITE_INSTRUCTIONS')\n\n return {\n 'loglevel': loglevel,\n 'logfromothermodules': logfromothermodules,\n 'max_logfiles_num': max_logfiles_num,\n 'max_size_in_mb_all_logs': max_size_in_mb_all_logs,\n 'sqlite_instructions': sqlite_instructions,\n }\n\n\ndef load_config() -> List[str]:\n env_config = load_config_from_env()\n file_config = load_config_from_file()\n\n logger.info('loading config from env')\n\n loglevel = env_config.get('loglevel')\n log_from_other_modules = env_config.get('logfromothermodules')\n max_logfiles_num = env_config.get('max_logfiles_num')\n max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs')\n sqlite_instructions = env_config.get('sqlite_instructions')\n\n if file_config is not None:\n logger.info('loading config from file')\n\n if file_config.get('loglevel') is not None:\n loglevel = file_config.get('loglevel')\n\n if file_config.get('logfromothermodules') is not None:\n log_from_other_modules = file_config.get('logfromothermodules')\n\n if file_config.get('max_logfiles_num') is not None:\n max_logfiles_num = file_config.get('max_logfiles_num')\n\n if file_config.get('max_size_in_mb_all_logs') is not None:\n max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs')\n\n if file_config.get('sqlite_instructions') is not None:\n sqlite_instructions = file_config.get('sqlite_instructions')\n\n args = [\n '--data-dir',\n '/data',\n '--logfile',\n '/logs/rotki.log',\n '--loglevel',\n loglevel if loglevel is not None else DEFAULT_LOG_LEVEL,\n ]\n\n if log_from_other_modules is True:\n args.append('--logfromothermodules')\n\n if max_logfiles_num is not None:\n args.append('--max-logfiles-num')\n args.append(int(max_logfiles_num))\n\n if max_size_in_mb_all_logs is not None:\n args.append('--max-size-in-mb-all-logs')\n args.append(int(max_size_in_mb_all_logs))\n\n if sqlite_instructions is not None:\n args.append('--sqlite-instructions')\n args.append(int(sqlite_instructions))\n return args\n\n\ncleanup_tmp()\n\nbase_args = [\n '/usr/sbin/rotki',\n '--rest-api-port',\n '4242',\n '--websockets-api-port',\n '4243',\n '--api-cors',\n 'http://localhost:*/*,app://.',\n '--api-host',\n '0.0.0.0',\n]\n\nconfig_args = load_config()\ncmd = base_args + config_args\n\nlogger.info('starting rotki backend')\n\nrotki = subprocess.Popen(cmd)\n\nif rotki.returncode == 1:\n logger.error('Failed to start rotki')\n exit(1)\n\nlogger.info('starting nginx')\n\nnginx = subprocess.Popen('nginx -g \"daemon off;\"', shell=True)\n\nif nginx.returncode == 1:\n logger.error('Failed to start nginx')\n exit(1)\n\n\ndef terminate_process(process_name: str, process: subprocess.Popen) -> None:\n logger.info(f'Terminating {process_name}')\n if process.poll() is not None:\n logger.error(f'{process_name} was not running. This means that some error occurred.')\n exit(1)\n\n process.terminate()\n process.wait() # wait untill the process terminates\n\n\ndef graceful_exit(signal, frame):\n logger.info(f'Received signal {signal}. Exiting gracefully')\n terminate_process('rotki', rotki)\n terminate_process('nginx', nginx)\n exit(0)\n\n\n# Handle exits via ctrl+c or via `docker stop` gracefully\nsignal(SIGINT, graceful_exit)\nsignal(SIGTERM, graceful_exit)\nsignal(SIGQUIT, graceful_exit)\n\n\nwhile True:\n time.sleep(60)\n\n if rotki.poll() is not None:\n logger.error('rotki has terminated exiting')\n exit(1)\n\n if nginx.poll() is not None:\n logger.error('nginx was not running')\n exit(1)\n\n logger.info('OK: processes still running')\n", "path": "packaging/docker/entrypoint.py"}]} | 2,125 | 309 |
gh_patches_debug_1724 | rasdani/github-patches | git_diff | translate__pootle-5621 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stats are wrong for children where the child name is repeated in the childs descendants
this is causing stats to foo
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_data/directory_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models import Max
10
11 from pootle_translationproject.models import TranslationProject
12
13 from .utils import RelatedStoresDataTool
14
15
16 class DirectoryDataTool(RelatedStoresDataTool):
17 """Retrieves aggregate stats for a Directory"""
18
19 group_by = ("store__parent__tp_path", )
20 cache_key_name = "directory"
21
22 @property
23 def context_name(self):
24 return self.context.pootle_path
25
26 @property
27 def max_unit_revision(self):
28 try:
29 return self.context.translationproject.data_tool.max_unit_revision
30 except TranslationProject.DoesNotExist:
31 return self.all_stat_data.aggregate(rev=Max("max_unit_revision"))["rev"]
32
33 def filter_data(self, qs):
34 return (
35 qs.filter(
36 store__translation_project=self.context.translation_project,
37 store__parent__tp_path__startswith=self.context.tp_path)
38 .exclude(store__parent=self.context))
39
40 def get_children_stats(self, qs):
41 children = {}
42 for child in qs.iterator():
43 self.add_child_stats(children, child)
44 child_stores = self.data_model.filter(store__parent=self.context).values(
45 *("store__name", ) + self.max_fields + self.sum_fields)
46 for child in child_stores:
47 self.add_child_stats(
48 children,
49 child,
50 root=child["store__name"],
51 use_aggregates=False)
52 self.add_submission_info(self.stat_data, children)
53 self.add_last_created_info(child_stores, children)
54 return children
55
56 def get_root_child_path(self, child):
57 return (
58 child["store__parent__tp_path"].replace(
59 self.context.tp_path, "").split("/")[0])
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_data/directory_data.py b/pootle/apps/pootle_data/directory_data.py
--- a/pootle/apps/pootle_data/directory_data.py
+++ b/pootle/apps/pootle_data/directory_data.py
@@ -54,6 +54,5 @@
return children
def get_root_child_path(self, child):
- return (
- child["store__parent__tp_path"].replace(
- self.context.tp_path, "").split("/")[0])
+ return child["store__parent__tp_path"][
+ len(self.context.tp_path):].split("/")[0]
| {"golden_diff": "diff --git a/pootle/apps/pootle_data/directory_data.py b/pootle/apps/pootle_data/directory_data.py\n--- a/pootle/apps/pootle_data/directory_data.py\n+++ b/pootle/apps/pootle_data/directory_data.py\n@@ -54,6 +54,5 @@\n return children\n \n def get_root_child_path(self, child):\n- return (\n- child[\"store__parent__tp_path\"].replace(\n- self.context.tp_path, \"\").split(\"/\")[0])\n+ return child[\"store__parent__tp_path\"][\n+ len(self.context.tp_path):].split(\"/\")[0]\n", "issue": "Stats are wrong for children where the child name is repeated in the childs descendants\nthis is causing stats to foo\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import Max\n\nfrom pootle_translationproject.models import TranslationProject\n\nfrom .utils import RelatedStoresDataTool\n\n\nclass DirectoryDataTool(RelatedStoresDataTool):\n \"\"\"Retrieves aggregate stats for a Directory\"\"\"\n\n group_by = (\"store__parent__tp_path\", )\n cache_key_name = \"directory\"\n\n @property\n def context_name(self):\n return self.context.pootle_path\n\n @property\n def max_unit_revision(self):\n try:\n return self.context.translationproject.data_tool.max_unit_revision\n except TranslationProject.DoesNotExist:\n return self.all_stat_data.aggregate(rev=Max(\"max_unit_revision\"))[\"rev\"]\n\n def filter_data(self, qs):\n return (\n qs.filter(\n store__translation_project=self.context.translation_project,\n store__parent__tp_path__startswith=self.context.tp_path)\n .exclude(store__parent=self.context))\n\n def get_children_stats(self, qs):\n children = {}\n for child in qs.iterator():\n self.add_child_stats(children, child)\n child_stores = self.data_model.filter(store__parent=self.context).values(\n *(\"store__name\", ) + self.max_fields + self.sum_fields)\n for child in child_stores:\n self.add_child_stats(\n children,\n child,\n root=child[\"store__name\"],\n use_aggregates=False)\n self.add_submission_info(self.stat_data, children)\n self.add_last_created_info(child_stores, children)\n return children\n\n def get_root_child_path(self, child):\n return (\n child[\"store__parent__tp_path\"].replace(\n self.context.tp_path, \"\").split(\"/\")[0])\n", "path": "pootle/apps/pootle_data/directory_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import Max\n\nfrom pootle_translationproject.models import TranslationProject\n\nfrom .utils import RelatedStoresDataTool\n\n\nclass DirectoryDataTool(RelatedStoresDataTool):\n \"\"\"Retrieves aggregate stats for a Directory\"\"\"\n\n group_by = (\"store__parent__tp_path\", )\n cache_key_name = \"directory\"\n\n @property\n def context_name(self):\n return self.context.pootle_path\n\n @property\n def max_unit_revision(self):\n try:\n return self.context.translationproject.data_tool.max_unit_revision\n except TranslationProject.DoesNotExist:\n return self.all_stat_data.aggregate(rev=Max(\"max_unit_revision\"))[\"rev\"]\n\n def filter_data(self, qs):\n return (\n qs.filter(\n store__translation_project=self.context.translation_project,\n store__parent__tp_path__startswith=self.context.tp_path)\n .exclude(store__parent=self.context))\n\n def get_children_stats(self, qs):\n children = {}\n for child in qs.iterator():\n self.add_child_stats(children, child)\n child_stores = self.data_model.filter(store__parent=self.context).values(\n *(\"store__name\", ) + self.max_fields + self.sum_fields)\n for child in child_stores:\n self.add_child_stats(\n children,\n child,\n root=child[\"store__name\"],\n use_aggregates=False)\n self.add_submission_info(self.stat_data, children)\n self.add_last_created_info(child_stores, children)\n return children\n\n def get_root_child_path(self, child):\n return child[\"store__parent__tp_path\"][\n len(self.context.tp_path):].split(\"/\")[0]\n", "path": "pootle/apps/pootle_data/directory_data.py"}]} | 831 | 143 |
gh_patches_debug_8324 | rasdani/github-patches | git_diff | mdn__kuma-7849 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add country geo information to /api/v1/whoami
We can use this immediately to be able to determine if a user should be seeing certain banners in Yari.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/api/v1/views.py`
Content:
```
1 import json
2 import os
3 from datetime import datetime
4 from urllib.parse import urlparse
5
6 import stripe
7 from django.conf import settings
8 from django.contrib.auth import get_user_model
9 from django.http import (
10 HttpResponse,
11 HttpResponseBadRequest,
12 HttpResponseForbidden,
13 JsonResponse,
14 )
15 from django.utils import translation
16 from django.views.decorators.cache import never_cache
17 from django.views.decorators.csrf import csrf_exempt
18 from django.views.decorators.http import require_GET, require_POST
19 from raven.contrib.django.models import client as raven_client
20 from rest_framework import status
21 from rest_framework.decorators import api_view
22 from rest_framework.permissions import IsAuthenticated
23 from rest_framework.renderers import JSONRenderer
24 from rest_framework.response import Response
25 from rest_framework.views import APIView
26 from waffle import flag_is_active
27 from waffle.decorators import waffle_flag
28 from waffle.models import Flag, Switch
29
30 from kuma.api.v1.forms import AccountSettingsForm
31 from kuma.api.v1.serializers import UserDetailsSerializer
32 from kuma.core.email_utils import render_email
33 from kuma.core.ga_tracking import (
34 ACTION_SUBSCRIPTION_CANCELED,
35 ACTION_SUBSCRIPTION_CREATED,
36 ACTION_SUBSCRIPTION_FEEDBACK,
37 CATEGORY_MONTHLY_PAYMENTS,
38 track_event,
39 )
40 from kuma.core.urlresolvers import reverse
41 from kuma.core.utils import requests_retry_session, send_mail_retrying
42 from kuma.users.models import User, UserSubscription
43 from kuma.users.newsletter.utils import refresh_is_user_newsletter_subscribed
44 from kuma.users.signals import (
45 newsletter_subscribed,
46 newsletter_unsubscribed,
47 username_changed,
48 )
49 from kuma.users.stripe_utils import (
50 cancel_stripe_customer_subscriptions,
51 create_stripe_customer_and_subscription_for_user,
52 retrieve_and_synchronize_subscription_info,
53 )
54 from kuma.users.templatetags.jinja_helpers import get_avatar_url
55 from kuma.wiki.templatetags.jinja_helpers import absolutify
56
57
58 @never_cache
59 @require_GET
60 def whoami(request):
61 """
62 Return a JSON object representing the current user, either
63 authenticated or anonymous.
64 """
65 user = request.user
66 if user.is_authenticated:
67 data = {
68 "username": user.username,
69 "is_authenticated": True,
70 "avatar_url": get_avatar_url(user),
71 "email": user.email,
72 "subscriber_number": user.subscriber_number,
73 }
74 if UserSubscription.objects.filter(user=user, canceled__isnull=True).exists():
75 data["is_subscriber"] = True
76 if user.is_staff:
77 data["is_staff"] = True
78 if user.is_superuser:
79 data["is_superuser"] = True
80 if user.is_beta_tester:
81 data["is_beta_tester"] = True
82 else:
83 data = {}
84
85 data["waffle"] = {
86 "flags": {},
87 "switches": {s.name: True for s in Switch.get_all() if s.is_active()},
88 }
89 # Specifically and more smartly loop over the waffle Flag objects
90 # to avoid unnecessary `cache.get(...)` calls within the `flag.is_active(request)`.
91 for flag in Flag.get_all():
92 if not request.user.is_authenticated:
93 # Majority of users are anonymous, so let's focus on that.
94 # Let's see if there's a quick reason to bail the
95 # expensive `flag.is_active(request)` call.
96 if (
97 flag.authenticated or flag.staff or flag.superusers
98 ) and not flag.everyone:
99 continue
100 if not (flag.languages or flag.percent or flag.everyone):
101 continue
102 if flag.languages:
103 languages = [ln.strip() for ln in flag.languages.split(",")]
104 if (
105 not hasattr(request, "LANGUAGE_CODE")
106 or request.LANGUAGE_CODE not in languages
107 ):
108 continue
109
110 if flag.is_active(request):
111 data["waffle"]["flags"][flag.name] = True
112
113 return JsonResponse(data)
114
115
116 @never_cache
117 def account_settings(request):
118 user = request.user
119 if not user.is_authenticated:
120 return HttpResponseForbidden("not signed in")
121 if request.method == "DELETE":
122 # This should cease to be necessary once we get rid of the Wiki models.
123 anon, _ = User.objects.get_or_create(username="Anonymous")
124 user.revisionakismetsubmission_set.update(sender=anon)
125 user.documentdeletionlog_set.update(user=anon)
126 user.documentspamattempt_set.update(user=anon)
127 user.documentspam_reviewed.update(reviewer=anon)
128 user.created_revisions.update(creator=anon)
129 user.created_attachment_revisions.update(creator=anon)
130 user.bans.update(user=anon)
131 user.bans_issued.update(by=anon)
132
133 user.delete()
134 return JsonResponse({"deleted": True})
135 elif request.method == "POST":
136 form = AccountSettingsForm(request.POST)
137 if not form.is_valid():
138 return JsonResponse({"errors": form.errors.get_json_data()}, status=400)
139
140 set_locale = None
141 if form.cleaned_data.get("locale"):
142 user.locale = set_locale = form.cleaned_data["locale"]
143 user.save()
144
145 response = JsonResponse({"ok": True})
146 if set_locale:
147 response.set_cookie(
148 key=settings.LANGUAGE_COOKIE_NAME,
149 value=set_locale,
150 max_age=settings.LANGUAGE_COOKIE_AGE,
151 path=settings.LANGUAGE_COOKIE_PATH,
152 domain=settings.LANGUAGE_COOKIE_DOMAIN,
153 secure=settings.LANGUAGE_COOKIE_SECURE,
154 )
155
156 return response
157
158 context = {
159 "csrfmiddlewaretoken": request.META.get("CSRF_COOKIE"),
160 "locale": user.locale,
161 }
162 return JsonResponse(context)
163
164
165 @waffle_flag("subscription")
166 @never_cache
167 @require_POST
168 def send_subscriptions_feedback(request):
169 """
170 Sends feedback to Google Analytics. This is done on the
171 backend to ensure that all feedback is collected, even
172 from users with DNT or where GA is disabled.
173 """
174 data = json.loads(request.body)
175 feedback = (data.get("feedback") or "").strip()
176
177 if not feedback:
178 return HttpResponseBadRequest("no feedback")
179
180 track_event(
181 CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_FEEDBACK, data["feedback"]
182 )
183 return HttpResponse(status=204)
184
185
186 @api_view(["POST", "GET", "DELETE"])
187 @never_cache
188 def subscriptions(request):
189 if not request.user.is_authenticated or not flag_is_active(request, "subscription"):
190 return Response(None, status=status.HTTP_403_FORBIDDEN)
191
192 if request.method == "POST":
193 create_stripe_customer_and_subscription_for_user(
194 request.user, request.user.email, request.data["stripe_token"]
195 )
196 return Response(None, status=status.HTTP_201_CREATED)
197 elif request.method == "DELETE":
198 cancelled = cancel_stripe_customer_subscriptions(request.user)
199 if cancelled:
200 return Response(None, status=status.HTTP_204_NO_CONTENT)
201 else:
202 return Response("nothing to cancel", status=status.HTTP_410_GONE)
203
204 all_subscriptions = []
205 subscription_info = retrieve_and_synchronize_subscription_info(request.user)
206 if subscription_info:
207 all_subscriptions.append(subscription_info)
208
209 return Response({"subscriptions": all_subscriptions})
210
211
212 @csrf_exempt
213 @require_POST
214 @never_cache
215 def stripe_hooks(request):
216 try:
217 payload = json.loads(request.body)
218 except ValueError:
219 return HttpResponseBadRequest("Invalid JSON payload")
220
221 try:
222 event = stripe.Event.construct_from(payload, stripe.api_key)
223 except stripe.error.StripeError:
224 raven_client.captureException()
225 return HttpResponseBadRequest()
226
227 # Generally, for this list of if-statements, see the create_missing_stripe_webhook
228 # function.
229 # The list of events there ought to at least minimally match what we're prepared
230 # to deal with here.
231
232 if event.type == "invoice.payment_succeeded":
233 invoice = event.data.object
234 _send_payment_received_email(invoice, request.LANGUAGE_CODE)
235 track_event(
236 CATEGORY_MONTHLY_PAYMENTS,
237 ACTION_SUBSCRIPTION_CREATED,
238 f"{settings.CONTRIBUTION_AMOUNT_USD:.2f}",
239 )
240
241 elif event.type == "customer.subscription.deleted":
242 obj = event.data.object
243 for user in User.objects.filter(stripe_customer_id=obj.customer):
244 UserSubscription.set_canceled(user, obj.id)
245 track_event(CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_CANCELED, "webhook")
246
247 else:
248 return HttpResponseBadRequest(
249 f"We did not expect a Stripe webhook of type {event.type!r}"
250 )
251
252 return HttpResponse()
253
254
255 def _send_payment_received_email(invoice, locale):
256 user = get_user_model().objects.get(stripe_customer_id=invoice.customer)
257 subscription_info = retrieve_and_synchronize_subscription_info(user)
258 locale = locale or settings.WIKI_DEFAULT_LANGUAGE
259 context = {
260 "payment_date": datetime.fromtimestamp(invoice.created),
261 "next_payment_date": subscription_info["next_payment_at"],
262 "invoice_number": invoice.number,
263 "cost": invoice.total / 100,
264 "credit_card_brand": subscription_info["brand"],
265 "manage_subscription_url": absolutify(reverse("payment_management")),
266 "faq_url": absolutify(reverse("payments_index")),
267 "contact_email": settings.CONTRIBUTION_SUPPORT_EMAIL,
268 }
269 with translation.override(locale):
270 subject = render_email("users/email/payment_received/subject.ltxt", context)
271 # Email subject *must not* contain newlines
272 subject = "".join(subject.splitlines())
273 plain = render_email("users/email/payment_received/plain.ltxt", context)
274
275 send_mail_retrying(
276 subject,
277 plain,
278 settings.DEFAULT_FROM_EMAIL,
279 [user.email],
280 attachment={
281 "name": os.path.basename(urlparse(invoice.invoice_pdf).path),
282 "bytes": _download_from_url(invoice.invoice_pdf),
283 "mime": "application/pdf",
284 },
285 )
286
287
288 def _download_from_url(url):
289 pdf_download = requests_retry_session().get(url)
290 pdf_download.raise_for_status()
291 return pdf_download.content
292
293
294 class APIUserDetailsView(APIView):
295 http_method_names = ["get", "put"]
296 serializer_class = UserDetailsSerializer
297 renderer_classes = [JSONRenderer]
298 permission_classes = [IsAuthenticated]
299
300 def get(self, request, format=None):
301 assert request.user.is_authenticated
302 serializer = UserDetailsSerializer(request.user, many=False)
303 return Response(serializer.data)
304
305 def put(self, request, format=None):
306 user = request.user
307 serializer = UserDetailsSerializer(instance=user, data=request.data)
308 if serializer.is_valid():
309 was_subscribed = user.is_newsletter_subscribed
310 old_username = user.username
311 serializer.save(user=user)
312
313 if not was_subscribed and user.is_newsletter_subscribed:
314 newsletter_subscribed.send(None, user=user)
315 if was_subscribed and not user.is_newsletter_subscribed:
316 newsletter_unsubscribed.send(None, user=user)
317
318 if old_username != user.username:
319 username_changed.send(None, user=user)
320
321 return Response(serializer.data, status=status.HTTP_200_OK)
322 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
323
324
325 user_details = never_cache(APIUserDetailsView.as_view())
326
327
328 @csrf_exempt
329 @require_POST
330 @never_cache
331 def sendinblue_hooks(request):
332 # Sendinblue does not sign its webhook requests, hence the event handlers following
333 # are different from the Stripe ones, in that they treat the event as a notification
334 # of a _potential_ change, while still needing to contact sendinblue to verify that
335 # it actually happened.
336 try:
337 payload = json.loads(request.body)
338 event = payload["event"]
339 email = payload["email"]
340 except (json.decoder.JSONDecodeError, KeyError) as exception:
341 return HttpResponseBadRequest(
342 f"{exception.__class__.__name__} on {request.body}"
343 )
344
345 if event == "unsubscribe":
346 refresh_is_user_newsletter_subscribed(email)
347 return HttpResponse()
348 else:
349 return HttpResponseBadRequest(
350 f"We did not expect a Sendinblue webhook of type {event['event']!r}"
351 )
352
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/api/v1/views.py b/kuma/api/v1/views.py
--- a/kuma/api/v1/views.py
+++ b/kuma/api/v1/views.py
@@ -82,6 +82,15 @@
else:
data = {}
+ geo = {}
+ # https://aws.amazon.com/about-aws/whats-new/2020/07/cloudfront-geolocation-headers/
+ cloudfront_country_header = "HTTP_CLOUDFRONT_VIEWER_COUNTRY_NAME"
+ cloudfront_country_value = request.META.get(cloudfront_country_header)
+ if cloudfront_country_value:
+ geo["country"] = cloudfront_country_value
+ if geo:
+ data["geo"] = geo
+
data["waffle"] = {
"flags": {},
"switches": {s.name: True for s in Switch.get_all() if s.is_active()},
| {"golden_diff": "diff --git a/kuma/api/v1/views.py b/kuma/api/v1/views.py\n--- a/kuma/api/v1/views.py\n+++ b/kuma/api/v1/views.py\n@@ -82,6 +82,15 @@\n else:\n data = {}\n \n+ geo = {}\n+ # https://aws.amazon.com/about-aws/whats-new/2020/07/cloudfront-geolocation-headers/\n+ cloudfront_country_header = \"HTTP_CLOUDFRONT_VIEWER_COUNTRY_NAME\"\n+ cloudfront_country_value = request.META.get(cloudfront_country_header)\n+ if cloudfront_country_value:\n+ geo[\"country\"] = cloudfront_country_value\n+ if geo:\n+ data[\"geo\"] = geo\n+\n data[\"waffle\"] = {\n \"flags\": {},\n \"switches\": {s.name: True for s in Switch.get_all() if s.is_active()},\n", "issue": "Add country geo information to /api/v1/whoami\nWe can use this immediately to be able to determine if a user should be seeing certain banners in Yari. \n", "before_files": [{"content": "import json\nimport os\nfrom datetime import datetime\nfrom urllib.parse import urlparse\n\nimport stripe\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import (\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponseForbidden,\n JsonResponse,\n)\nfrom django.utils import translation\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET, require_POST\nfrom raven.contrib.django.models import client as raven_client\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom waffle import flag_is_active\nfrom waffle.decorators import waffle_flag\nfrom waffle.models import Flag, Switch\n\nfrom kuma.api.v1.forms import AccountSettingsForm\nfrom kuma.api.v1.serializers import UserDetailsSerializer\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.ga_tracking import (\n ACTION_SUBSCRIPTION_CANCELED,\n ACTION_SUBSCRIPTION_CREATED,\n ACTION_SUBSCRIPTION_FEEDBACK,\n CATEGORY_MONTHLY_PAYMENTS,\n track_event,\n)\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session, send_mail_retrying\nfrom kuma.users.models import User, UserSubscription\nfrom kuma.users.newsletter.utils import refresh_is_user_newsletter_subscribed\nfrom kuma.users.signals import (\n newsletter_subscribed,\n newsletter_unsubscribed,\n username_changed,\n)\nfrom kuma.users.stripe_utils import (\n cancel_stripe_customer_subscriptions,\n create_stripe_customer_and_subscription_for_user,\n retrieve_and_synchronize_subscription_info,\n)\nfrom kuma.users.templatetags.jinja_helpers import get_avatar_url\nfrom kuma.wiki.templatetags.jinja_helpers import absolutify\n\n\n@never_cache\n@require_GET\ndef whoami(request):\n \"\"\"\n Return a JSON object representing the current user, either\n authenticated or anonymous.\n \"\"\"\n user = request.user\n if user.is_authenticated:\n data = {\n \"username\": user.username,\n \"is_authenticated\": True,\n \"avatar_url\": get_avatar_url(user),\n \"email\": user.email,\n \"subscriber_number\": user.subscriber_number,\n }\n if UserSubscription.objects.filter(user=user, canceled__isnull=True).exists():\n data[\"is_subscriber\"] = True\n if user.is_staff:\n data[\"is_staff\"] = True\n if user.is_superuser:\n data[\"is_superuser\"] = True\n if user.is_beta_tester:\n data[\"is_beta_tester\"] = True\n else:\n data = {}\n\n data[\"waffle\"] = {\n \"flags\": {},\n \"switches\": {s.name: True for s in Switch.get_all() if s.is_active()},\n }\n # Specifically and more smartly loop over the waffle Flag objects\n # to avoid unnecessary `cache.get(...)` calls within the `flag.is_active(request)`.\n for flag in Flag.get_all():\n if not request.user.is_authenticated:\n # Majority of users are anonymous, so let's focus on that.\n # Let's see if there's a quick reason to bail the\n # expensive `flag.is_active(request)` call.\n if (\n flag.authenticated or flag.staff or flag.superusers\n ) and not flag.everyone:\n continue\n if not (flag.languages or flag.percent or flag.everyone):\n continue\n if flag.languages:\n languages = [ln.strip() for ln in flag.languages.split(\",\")]\n if (\n not hasattr(request, \"LANGUAGE_CODE\")\n or request.LANGUAGE_CODE not in languages\n ):\n continue\n\n if flag.is_active(request):\n data[\"waffle\"][\"flags\"][flag.name] = True\n\n return JsonResponse(data)\n\n\n@never_cache\ndef account_settings(request):\n user = request.user\n if not user.is_authenticated:\n return HttpResponseForbidden(\"not signed in\")\n if request.method == \"DELETE\":\n # This should cease to be necessary once we get rid of the Wiki models.\n anon, _ = User.objects.get_or_create(username=\"Anonymous\")\n user.revisionakismetsubmission_set.update(sender=anon)\n user.documentdeletionlog_set.update(user=anon)\n user.documentspamattempt_set.update(user=anon)\n user.documentspam_reviewed.update(reviewer=anon)\n user.created_revisions.update(creator=anon)\n user.created_attachment_revisions.update(creator=anon)\n user.bans.update(user=anon)\n user.bans_issued.update(by=anon)\n\n user.delete()\n return JsonResponse({\"deleted\": True})\n elif request.method == \"POST\":\n form = AccountSettingsForm(request.POST)\n if not form.is_valid():\n return JsonResponse({\"errors\": form.errors.get_json_data()}, status=400)\n\n set_locale = None\n if form.cleaned_data.get(\"locale\"):\n user.locale = set_locale = form.cleaned_data[\"locale\"]\n user.save()\n\n response = JsonResponse({\"ok\": True})\n if set_locale:\n response.set_cookie(\n key=settings.LANGUAGE_COOKIE_NAME,\n value=set_locale,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN,\n secure=settings.LANGUAGE_COOKIE_SECURE,\n )\n\n return response\n\n context = {\n \"csrfmiddlewaretoken\": request.META.get(\"CSRF_COOKIE\"),\n \"locale\": user.locale,\n }\n return JsonResponse(context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\n@require_POST\ndef send_subscriptions_feedback(request):\n \"\"\"\n Sends feedback to Google Analytics. This is done on the\n backend to ensure that all feedback is collected, even\n from users with DNT or where GA is disabled.\n \"\"\"\n data = json.loads(request.body)\n feedback = (data.get(\"feedback\") or \"\").strip()\n\n if not feedback:\n return HttpResponseBadRequest(\"no feedback\")\n\n track_event(\n CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_FEEDBACK, data[\"feedback\"]\n )\n return HttpResponse(status=204)\n\n\n@api_view([\"POST\", \"GET\", \"DELETE\"])\n@never_cache\ndef subscriptions(request):\n if not request.user.is_authenticated or not flag_is_active(request, \"subscription\"):\n return Response(None, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == \"POST\":\n create_stripe_customer_and_subscription_for_user(\n request.user, request.user.email, request.data[\"stripe_token\"]\n )\n return Response(None, status=status.HTTP_201_CREATED)\n elif request.method == \"DELETE\":\n cancelled = cancel_stripe_customer_subscriptions(request.user)\n if cancelled:\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(\"nothing to cancel\", status=status.HTTP_410_GONE)\n\n all_subscriptions = []\n subscription_info = retrieve_and_synchronize_subscription_info(request.user)\n if subscription_info:\n all_subscriptions.append(subscription_info)\n\n return Response({\"subscriptions\": all_subscriptions})\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef stripe_hooks(request):\n try:\n payload = json.loads(request.body)\n except ValueError:\n return HttpResponseBadRequest(\"Invalid JSON payload\")\n\n try:\n event = stripe.Event.construct_from(payload, stripe.api_key)\n except stripe.error.StripeError:\n raven_client.captureException()\n return HttpResponseBadRequest()\n\n # Generally, for this list of if-statements, see the create_missing_stripe_webhook\n # function.\n # The list of events there ought to at least minimally match what we're prepared\n # to deal with here.\n\n if event.type == \"invoice.payment_succeeded\":\n invoice = event.data.object\n _send_payment_received_email(invoice, request.LANGUAGE_CODE)\n track_event(\n CATEGORY_MONTHLY_PAYMENTS,\n ACTION_SUBSCRIPTION_CREATED,\n f\"{settings.CONTRIBUTION_AMOUNT_USD:.2f}\",\n )\n\n elif event.type == \"customer.subscription.deleted\":\n obj = event.data.object\n for user in User.objects.filter(stripe_customer_id=obj.customer):\n UserSubscription.set_canceled(user, obj.id)\n track_event(CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_CANCELED, \"webhook\")\n\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Stripe webhook of type {event.type!r}\"\n )\n\n return HttpResponse()\n\n\ndef _send_payment_received_email(invoice, locale):\n user = get_user_model().objects.get(stripe_customer_id=invoice.customer)\n subscription_info = retrieve_and_synchronize_subscription_info(user)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n context = {\n \"payment_date\": datetime.fromtimestamp(invoice.created),\n \"next_payment_date\": subscription_info[\"next_payment_at\"],\n \"invoice_number\": invoice.number,\n \"cost\": invoice.total / 100,\n \"credit_card_brand\": subscription_info[\"brand\"],\n \"manage_subscription_url\": absolutify(reverse(\"payment_management\")),\n \"faq_url\": absolutify(reverse(\"payments_index\")),\n \"contact_email\": settings.CONTRIBUTION_SUPPORT_EMAIL,\n }\n with translation.override(locale):\n subject = render_email(\"users/email/payment_received/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/payment_received/plain.ltxt\", context)\n\n send_mail_retrying(\n subject,\n plain,\n settings.DEFAULT_FROM_EMAIL,\n [user.email],\n attachment={\n \"name\": os.path.basename(urlparse(invoice.invoice_pdf).path),\n \"bytes\": _download_from_url(invoice.invoice_pdf),\n \"mime\": \"application/pdf\",\n },\n )\n\n\ndef _download_from_url(url):\n pdf_download = requests_retry_session().get(url)\n pdf_download.raise_for_status()\n return pdf_download.content\n\n\nclass APIUserDetailsView(APIView):\n http_method_names = [\"get\", \"put\"]\n serializer_class = UserDetailsSerializer\n renderer_classes = [JSONRenderer]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n assert request.user.is_authenticated\n serializer = UserDetailsSerializer(request.user, many=False)\n return Response(serializer.data)\n\n def put(self, request, format=None):\n user = request.user\n serializer = UserDetailsSerializer(instance=user, data=request.data)\n if serializer.is_valid():\n was_subscribed = user.is_newsletter_subscribed\n old_username = user.username\n serializer.save(user=user)\n\n if not was_subscribed and user.is_newsletter_subscribed:\n newsletter_subscribed.send(None, user=user)\n if was_subscribed and not user.is_newsletter_subscribed:\n newsletter_unsubscribed.send(None, user=user)\n\n if old_username != user.username:\n username_changed.send(None, user=user)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nuser_details = never_cache(APIUserDetailsView.as_view())\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef sendinblue_hooks(request):\n # Sendinblue does not sign its webhook requests, hence the event handlers following\n # are different from the Stripe ones, in that they treat the event as a notification\n # of a _potential_ change, while still needing to contact sendinblue to verify that\n # it actually happened.\n try:\n payload = json.loads(request.body)\n event = payload[\"event\"]\n email = payload[\"email\"]\n except (json.decoder.JSONDecodeError, KeyError) as exception:\n return HttpResponseBadRequest(\n f\"{exception.__class__.__name__} on {request.body}\"\n )\n\n if event == \"unsubscribe\":\n refresh_is_user_newsletter_subscribed(email)\n return HttpResponse()\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Sendinblue webhook of type {event['event']!r}\"\n )\n", "path": "kuma/api/v1/views.py"}], "after_files": [{"content": "import json\nimport os\nfrom datetime import datetime\nfrom urllib.parse import urlparse\n\nimport stripe\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import (\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponseForbidden,\n JsonResponse,\n)\nfrom django.utils import translation\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET, require_POST\nfrom raven.contrib.django.models import client as raven_client\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom waffle import flag_is_active\nfrom waffle.decorators import waffle_flag\nfrom waffle.models import Flag, Switch\n\nfrom kuma.api.v1.forms import AccountSettingsForm\nfrom kuma.api.v1.serializers import UserDetailsSerializer\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.ga_tracking import (\n ACTION_SUBSCRIPTION_CANCELED,\n ACTION_SUBSCRIPTION_CREATED,\n ACTION_SUBSCRIPTION_FEEDBACK,\n CATEGORY_MONTHLY_PAYMENTS,\n track_event,\n)\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session, send_mail_retrying\nfrom kuma.users.models import User, UserSubscription\nfrom kuma.users.newsletter.utils import refresh_is_user_newsletter_subscribed\nfrom kuma.users.signals import (\n newsletter_subscribed,\n newsletter_unsubscribed,\n username_changed,\n)\nfrom kuma.users.stripe_utils import (\n cancel_stripe_customer_subscriptions,\n create_stripe_customer_and_subscription_for_user,\n retrieve_and_synchronize_subscription_info,\n)\nfrom kuma.users.templatetags.jinja_helpers import get_avatar_url\nfrom kuma.wiki.templatetags.jinja_helpers import absolutify\n\n\n@never_cache\n@require_GET\ndef whoami(request):\n \"\"\"\n Return a JSON object representing the current user, either\n authenticated or anonymous.\n \"\"\"\n user = request.user\n if user.is_authenticated:\n data = {\n \"username\": user.username,\n \"is_authenticated\": True,\n \"avatar_url\": get_avatar_url(user),\n \"email\": user.email,\n \"subscriber_number\": user.subscriber_number,\n }\n if UserSubscription.objects.filter(user=user, canceled__isnull=True).exists():\n data[\"is_subscriber\"] = True\n if user.is_staff:\n data[\"is_staff\"] = True\n if user.is_superuser:\n data[\"is_superuser\"] = True\n if user.is_beta_tester:\n data[\"is_beta_tester\"] = True\n else:\n data = {}\n\n geo = {}\n # https://aws.amazon.com/about-aws/whats-new/2020/07/cloudfront-geolocation-headers/\n cloudfront_country_header = \"HTTP_CLOUDFRONT_VIEWER_COUNTRY_NAME\"\n cloudfront_country_value = request.META.get(cloudfront_country_header)\n if cloudfront_country_value:\n geo[\"country\"] = cloudfront_country_value\n if geo:\n data[\"geo\"] = geo\n\n data[\"waffle\"] = {\n \"flags\": {},\n \"switches\": {s.name: True for s in Switch.get_all() if s.is_active()},\n }\n # Specifically and more smartly loop over the waffle Flag objects\n # to avoid unnecessary `cache.get(...)` calls within the `flag.is_active(request)`.\n for flag in Flag.get_all():\n if not request.user.is_authenticated:\n # Majority of users are anonymous, so let's focus on that.\n # Let's see if there's a quick reason to bail the\n # expensive `flag.is_active(request)` call.\n if (\n flag.authenticated or flag.staff or flag.superusers\n ) and not flag.everyone:\n continue\n if not (flag.languages or flag.percent or flag.everyone):\n continue\n if flag.languages:\n languages = [ln.strip() for ln in flag.languages.split(\",\")]\n if (\n not hasattr(request, \"LANGUAGE_CODE\")\n or request.LANGUAGE_CODE not in languages\n ):\n continue\n\n if flag.is_active(request):\n data[\"waffle\"][\"flags\"][flag.name] = True\n\n return JsonResponse(data)\n\n\n@never_cache\ndef account_settings(request):\n user = request.user\n if not user.is_authenticated:\n return HttpResponseForbidden(\"not signed in\")\n if request.method == \"DELETE\":\n # This should cease to be necessary once we get rid of the Wiki models.\n anon, _ = User.objects.get_or_create(username=\"Anonymous\")\n user.revisionakismetsubmission_set.update(sender=anon)\n user.documentdeletionlog_set.update(user=anon)\n user.documentspamattempt_set.update(user=anon)\n user.documentspam_reviewed.update(reviewer=anon)\n user.created_revisions.update(creator=anon)\n user.created_attachment_revisions.update(creator=anon)\n user.bans.update(user=anon)\n user.bans_issued.update(by=anon)\n\n user.delete()\n return JsonResponse({\"deleted\": True})\n elif request.method == \"POST\":\n form = AccountSettingsForm(request.POST)\n if not form.is_valid():\n return JsonResponse({\"errors\": form.errors.get_json_data()}, status=400)\n\n set_locale = None\n if form.cleaned_data.get(\"locale\"):\n user.locale = set_locale = form.cleaned_data[\"locale\"]\n user.save()\n\n response = JsonResponse({\"ok\": True})\n if set_locale:\n response.set_cookie(\n key=settings.LANGUAGE_COOKIE_NAME,\n value=set_locale,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN,\n secure=settings.LANGUAGE_COOKIE_SECURE,\n )\n\n return response\n\n context = {\n \"csrfmiddlewaretoken\": request.META.get(\"CSRF_COOKIE\"),\n \"locale\": user.locale,\n }\n return JsonResponse(context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\n@require_POST\ndef send_subscriptions_feedback(request):\n \"\"\"\n Sends feedback to Google Analytics. This is done on the\n backend to ensure that all feedback is collected, even\n from users with DNT or where GA is disabled.\n \"\"\"\n data = json.loads(request.body)\n feedback = (data.get(\"feedback\") or \"\").strip()\n\n if not feedback:\n return HttpResponseBadRequest(\"no feedback\")\n\n track_event(\n CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_FEEDBACK, data[\"feedback\"]\n )\n return HttpResponse(status=204)\n\n\n@api_view([\"POST\", \"GET\", \"DELETE\"])\n@never_cache\ndef subscriptions(request):\n if not request.user.is_authenticated or not flag_is_active(request, \"subscription\"):\n return Response(None, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == \"POST\":\n create_stripe_customer_and_subscription_for_user(\n request.user, request.user.email, request.data[\"stripe_token\"]\n )\n return Response(None, status=status.HTTP_201_CREATED)\n elif request.method == \"DELETE\":\n cancelled = cancel_stripe_customer_subscriptions(request.user)\n if cancelled:\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(\"nothing to cancel\", status=status.HTTP_410_GONE)\n\n all_subscriptions = []\n subscription_info = retrieve_and_synchronize_subscription_info(request.user)\n if subscription_info:\n all_subscriptions.append(subscription_info)\n\n return Response({\"subscriptions\": all_subscriptions})\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef stripe_hooks(request):\n try:\n payload = json.loads(request.body)\n except ValueError:\n return HttpResponseBadRequest(\"Invalid JSON payload\")\n\n try:\n event = stripe.Event.construct_from(payload, stripe.api_key)\n except stripe.error.StripeError:\n raven_client.captureException()\n return HttpResponseBadRequest()\n\n # Generally, for this list of if-statements, see the create_missing_stripe_webhook\n # function.\n # The list of events there ought to at least minimally match what we're prepared\n # to deal with here.\n\n if event.type == \"invoice.payment_succeeded\":\n invoice = event.data.object\n _send_payment_received_email(invoice, request.LANGUAGE_CODE)\n track_event(\n CATEGORY_MONTHLY_PAYMENTS,\n ACTION_SUBSCRIPTION_CREATED,\n f\"{settings.CONTRIBUTION_AMOUNT_USD:.2f}\",\n )\n\n elif event.type == \"customer.subscription.deleted\":\n obj = event.data.object\n for user in User.objects.filter(stripe_customer_id=obj.customer):\n UserSubscription.set_canceled(user, obj.id)\n track_event(CATEGORY_MONTHLY_PAYMENTS, ACTION_SUBSCRIPTION_CANCELED, \"webhook\")\n\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Stripe webhook of type {event.type!r}\"\n )\n\n return HttpResponse()\n\n\ndef _send_payment_received_email(invoice, locale):\n user = get_user_model().objects.get(stripe_customer_id=invoice.customer)\n subscription_info = retrieve_and_synchronize_subscription_info(user)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n context = {\n \"payment_date\": datetime.fromtimestamp(invoice.created),\n \"next_payment_date\": subscription_info[\"next_payment_at\"],\n \"invoice_number\": invoice.number,\n \"cost\": invoice.total / 100,\n \"credit_card_brand\": subscription_info[\"brand\"],\n \"manage_subscription_url\": absolutify(reverse(\"payment_management\")),\n \"faq_url\": absolutify(reverse(\"payments_index\")),\n \"contact_email\": settings.CONTRIBUTION_SUPPORT_EMAIL,\n }\n with translation.override(locale):\n subject = render_email(\"users/email/payment_received/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/payment_received/plain.ltxt\", context)\n\n send_mail_retrying(\n subject,\n plain,\n settings.DEFAULT_FROM_EMAIL,\n [user.email],\n attachment={\n \"name\": os.path.basename(urlparse(invoice.invoice_pdf).path),\n \"bytes\": _download_from_url(invoice.invoice_pdf),\n \"mime\": \"application/pdf\",\n },\n )\n\n\ndef _download_from_url(url):\n pdf_download = requests_retry_session().get(url)\n pdf_download.raise_for_status()\n return pdf_download.content\n\n\nclass APIUserDetailsView(APIView):\n http_method_names = [\"get\", \"put\"]\n serializer_class = UserDetailsSerializer\n renderer_classes = [JSONRenderer]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n assert request.user.is_authenticated\n serializer = UserDetailsSerializer(request.user, many=False)\n return Response(serializer.data)\n\n def put(self, request, format=None):\n user = request.user\n serializer = UserDetailsSerializer(instance=user, data=request.data)\n if serializer.is_valid():\n was_subscribed = user.is_newsletter_subscribed\n old_username = user.username\n serializer.save(user=user)\n\n if not was_subscribed and user.is_newsletter_subscribed:\n newsletter_subscribed.send(None, user=user)\n if was_subscribed and not user.is_newsletter_subscribed:\n newsletter_unsubscribed.send(None, user=user)\n\n if old_username != user.username:\n username_changed.send(None, user=user)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nuser_details = never_cache(APIUserDetailsView.as_view())\n\n\n@csrf_exempt\n@require_POST\n@never_cache\ndef sendinblue_hooks(request):\n # Sendinblue does not sign its webhook requests, hence the event handlers following\n # are different from the Stripe ones, in that they treat the event as a notification\n # of a _potential_ change, while still needing to contact sendinblue to verify that\n # it actually happened.\n try:\n payload = json.loads(request.body)\n event = payload[\"event\"]\n email = payload[\"email\"]\n except (json.decoder.JSONDecodeError, KeyError) as exception:\n return HttpResponseBadRequest(\n f\"{exception.__class__.__name__} on {request.body}\"\n )\n\n if event == \"unsubscribe\":\n refresh_is_user_newsletter_subscribed(email)\n return HttpResponse()\n else:\n return HttpResponseBadRequest(\n f\"We did not expect a Sendinblue webhook of type {event['event']!r}\"\n )\n", "path": "kuma/api/v1/views.py"}]} | 3,836 | 202 |
gh_patches_debug_34310 | rasdani/github-patches | git_diff | gratipay__gratipay.com-1931 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
charts.json returns null for `widthdrawals`
Expected:
``` json
[
{
"active_users": 13,
"charges": 27.02,
"date": "2014-01-15",
"total_gifts": 251383.81,
"total_users": 100,
"weekly_gifts": 382.95,
"withdrawals": 3.53
},
...
```
Received:
``` json
[
{
"active_users": 13,
"charges": 27.02,
"date": "2014-01-15",
"total_gifts": 251383.81,
"total_users": 100,
"weekly_gifts": 382.95,
"withdrawals": null
},
...
```
2012-08-17 and every week before it is broken:
``` json
{
"active_users": 313,
"charges": 964.40,
"date": "2012-08-17",
"total_gifts": 5812.20,
"total_users": 4067,
"weekly_gifts": 1052.09,
"withdrawals": null
},
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/models/__init__.py`
Content:
```
1 """
2
3 The most important object in the Gittip object model is Participant, and the
4 second most important one is Ccommunity. There are a few others, but those are
5 the most important two. Participant, in particular, is at the center of
6 everything on Gittip.
7
8 """
9 from postgres import Postgres
10
11 class GittipDB(Postgres):
12
13 def self_check(self):
14 """
15 Runs all available self checks on the database.
16 """
17 self._check_balances()
18 self._check_tips()
19 self._check_orphans()
20 self._check_orphans_no_tips()
21 self._check_paydays_volumes()
22
23 def _check_tips(self):
24 """
25 Checks that there are no rows in tips with duplicate (tipper, tippee, mtime).
26
27 https://github.com/gittip/www.gittip.com/issues/1704
28 """
29 conflicting_tips = self.one("""
30 SELECT count(*)
31 FROM
32 (
33 SELECT * FROM tips
34 EXCEPT
35 SELECT DISTINCT ON(tipper, tippee, mtime) *
36 FROM tips
37 ORDER BY tipper, tippee, mtime
38 ) AS foo
39 """)
40 assert conflicting_tips == 0
41
42 def _check_balances(self):
43 """
44 Recalculates balances for all participants from transfers and exchanges.
45
46 https://github.com/gittip/www.gittip.com/issues/1118
47 """
48 with self.get_cursor() as cursor:
49 if cursor.one("select exists (select * from paydays where ts_end < ts_start) as running"):
50 # payday is running and the query bellow does not account for pending
51 return
52 b = cursor.one("""
53 select count(*)
54 from (
55 select username, sum(a) as balance
56 from (
57 select participant as username, sum(amount) as a
58 from exchanges
59 where amount > 0
60 group by participant
61
62 union
63
64 select participant as username, sum(amount-fee) as a
65 from exchanges
66 where amount < 0
67 group by participant
68
69 union
70
71 select tipper as username, sum(-amount) as a
72 from transfers
73 group by tipper
74
75 union
76
77 select tippee as username, sum(amount) as a
78 from transfers
79 group by tippee
80 ) as foo
81 group by username
82
83 except
84
85 select username, balance
86 from participants
87 ) as foo2
88 """)
89 assert b == 0, "conflicting balances: {}".format(b)
90
91 def _check_orphans(self):
92 """
93 Finds participants that
94 * does not have corresponding elsewhere account
95 * have not been absorbed by other participant
96
97 These are broken because new participants arise from elsewhere
98 and elsewhere is detached only by take over which makes a note
99 in absorptions if it removes the last elsewhere account.
100
101 Especially bad case is when also claimed_time is set because
102 there must have been elsewhere account attached and used to sign in.
103
104 https://github.com/gittip/www.gittip.com/issues/617
105 """
106 orphans = self.all("""
107 select username
108 from participants
109 where not exists (select * from elsewhere where elsewhere.participant=username)
110 and not exists (select * from absorptions where archived_as=username)
111 """)
112 assert len(orphans) == 0, "missing elsewheres: {}".format(list(orphans))
113
114 def _check_orphans_no_tips(self):
115 """
116 Finds participants
117 * without elsewhere account attached
118 * having non zero outstanding tip
119
120 This should not happen because when we remove the last elsewhere account
121 in take_over we also zero out all tips.
122 """
123 tips_with_orphans = self.all("""
124 WITH orphans AS (
125 SELECT username FROM participants
126 WHERE NOT EXISTS (SELECT 1 FROM elsewhere WHERE participant=username)
127 ), valid_tips AS (
128 SELECT * FROM (
129 SELECT DISTINCT ON (tipper, tippee) *
130 FROM tips
131 ORDER BY tipper, tippee, mtime DESC
132 ) AS foo
133 WHERE amount > 0
134 )
135 SELECT id FROM valid_tips
136 WHERE tipper IN (SELECT * FROM orphans)
137 OR tippee IN (SELECT * FROM orphans)
138 """)
139 known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'
140 real = set(tips_with_orphans) - known
141 assert len(real) == 0, real
142
143 def _check_paydays_volumes(self):
144 """
145 Recalculate *_volume fields in paydays table using exchanges table.
146 """
147 charge_volume = self.all("""
148 select * from (
149 select id, ts_start, charge_volume, (
150 select sum(amount+fee)
151 from exchanges
152 where timestamp > ts_start
153 and timestamp < ts_end
154 and amount > 0
155 ) as ref
156 from paydays
157 order by id
158 ) as foo
159 where charge_volume != ref
160 """)
161 assert len(charge_volume) == 0
162
163 charge_fees_volume = self.all("""
164 select * from (
165 select id, ts_start, charge_fees_volume, (
166 select sum(fee)
167 from exchanges
168 where timestamp > ts_start
169 and timestamp < ts_end
170 and amount > 0
171 ) as ref
172 from paydays
173 order by id
174 ) as foo
175 where charge_fees_volume != ref
176 """)
177 assert len(charge_fees_volume) == 0
178
179 ach_volume = self.all("""
180 select * from (
181 select id, ts_start, ach_volume, (
182 select sum(amount)
183 from exchanges
184 where timestamp > ts_start
185 and timestamp < ts_end
186 and amount < 0
187 ) as ref
188 from paydays
189 order by id
190 ) as foo
191 where ach_volume != ref
192 """)
193 assert len(ach_volume) == 0
194
195 ach_fees_volume = self.all("""
196 select * from (
197 select id, ts_start, ach_fees_volume, (
198 select sum(fee)
199 from exchanges
200 where timestamp > ts_start
201 and timestamp < ts_end
202 and amount < 0
203 ) as ref
204 from paydays
205 order by id
206 ) as foo
207 where ach_fees_volume != ref
208 """)
209 assert len(ach_fees_volume) == 0
210 #
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gittip/models/__init__.py b/gittip/models/__init__.py
--- a/gittip/models/__init__.py
+++ b/gittip/models/__init__.py
@@ -147,7 +147,7 @@
charge_volume = self.all("""
select * from (
select id, ts_start, charge_volume, (
- select sum(amount+fee)
+ select coalesce(sum(amount+fee), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
@@ -163,7 +163,7 @@
charge_fees_volume = self.all("""
select * from (
select id, ts_start, charge_fees_volume, (
- select sum(fee)
+ select coalesce(sum(fee), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
@@ -179,7 +179,7 @@
ach_volume = self.all("""
select * from (
select id, ts_start, ach_volume, (
- select sum(amount)
+ select coalesce(sum(amount), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
@@ -195,7 +195,7 @@
ach_fees_volume = self.all("""
select * from (
select id, ts_start, ach_fees_volume, (
- select sum(fee)
+ select coalesce(sum(fee), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
| {"golden_diff": "diff --git a/gittip/models/__init__.py b/gittip/models/__init__.py\n--- a/gittip/models/__init__.py\n+++ b/gittip/models/__init__.py\n@@ -147,7 +147,7 @@\n charge_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, charge_volume, (\n- select sum(amount+fee)\n+ select coalesce(sum(amount+fee), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n@@ -163,7 +163,7 @@\n charge_fees_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, charge_fees_volume, (\n- select sum(fee)\n+ select coalesce(sum(fee), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n@@ -179,7 +179,7 @@\n ach_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, ach_volume, (\n- select sum(amount)\n+ select coalesce(sum(amount), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n@@ -195,7 +195,7 @@\n ach_fees_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, ach_fees_volume, (\n- select sum(fee)\n+ select coalesce(sum(fee), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n", "issue": "charts.json returns null for `widthdrawals`\nExpected:\n\n``` json\n[\n {\n \"active_users\": 13,\n \"charges\": 27.02,\n \"date\": \"2014-01-15\",\n \"total_gifts\": 251383.81,\n \"total_users\": 100,\n \"weekly_gifts\": 382.95,\n \"withdrawals\": 3.53\n },\n...\n```\n\nReceived:\n\n``` json\n[\n {\n \"active_users\": 13,\n \"charges\": 27.02,\n \"date\": \"2014-01-15\",\n \"total_gifts\": 251383.81,\n \"total_users\": 100,\n \"weekly_gifts\": 382.95,\n \"withdrawals\": null\n },\n...\n```\n\n2012-08-17 and every week before it is broken:\n\n``` json\n {\n \"active_users\": 313,\n \"charges\": 964.40,\n \"date\": \"2012-08-17\",\n \"total_gifts\": 5812.20,\n \"total_users\": 4067,\n \"weekly_gifts\": 1052.09,\n \"withdrawals\": null\n },\n\n```\n\n", "before_files": [{"content": "\"\"\"\n\nThe most important object in the Gittip object model is Participant, and the\nsecond most important one is Ccommunity. There are a few others, but those are\nthe most important two. Participant, in particular, is at the center of\neverything on Gittip.\n\n\"\"\"\nfrom postgres import Postgres\n\nclass GittipDB(Postgres):\n\n def self_check(self):\n \"\"\"\n Runs all available self checks on the database.\n \"\"\"\n self._check_balances()\n self._check_tips()\n self._check_orphans()\n self._check_orphans_no_tips()\n self._check_paydays_volumes()\n\n def _check_tips(self):\n \"\"\"\n Checks that there are no rows in tips with duplicate (tipper, tippee, mtime).\n\n https://github.com/gittip/www.gittip.com/issues/1704\n \"\"\"\n conflicting_tips = self.one(\"\"\"\n SELECT count(*)\n FROM\n (\n SELECT * FROM tips\n EXCEPT\n SELECT DISTINCT ON(tipper, tippee, mtime) *\n FROM tips\n ORDER BY tipper, tippee, mtime\n ) AS foo\n \"\"\")\n assert conflicting_tips == 0\n\n def _check_balances(self):\n \"\"\"\n Recalculates balances for all participants from transfers and exchanges.\n\n https://github.com/gittip/www.gittip.com/issues/1118\n \"\"\"\n with self.get_cursor() as cursor:\n if cursor.one(\"select exists (select * from paydays where ts_end < ts_start) as running\"):\n # payday is running and the query bellow does not account for pending\n return\n b = cursor.one(\"\"\"\n select count(*)\n from (\n select username, sum(a) as balance\n from (\n select participant as username, sum(amount) as a\n from exchanges\n where amount > 0\n group by participant\n\n union\n\n select participant as username, sum(amount-fee) as a\n from exchanges\n where amount < 0\n group by participant\n\n union\n\n select tipper as username, sum(-amount) as a\n from transfers\n group by tipper\n\n union\n\n select tippee as username, sum(amount) as a\n from transfers\n group by tippee\n ) as foo\n group by username\n\n except\n\n select username, balance\n from participants\n ) as foo2\n \"\"\")\n assert b == 0, \"conflicting balances: {}\".format(b)\n\n def _check_orphans(self):\n \"\"\"\n Finds participants that\n * does not have corresponding elsewhere account\n * have not been absorbed by other participant\n\n These are broken because new participants arise from elsewhere\n and elsewhere is detached only by take over which makes a note\n in absorptions if it removes the last elsewhere account.\n\n Especially bad case is when also claimed_time is set because\n there must have been elsewhere account attached and used to sign in.\n\n https://github.com/gittip/www.gittip.com/issues/617\n \"\"\"\n orphans = self.all(\"\"\"\n select username\n from participants\n where not exists (select * from elsewhere where elsewhere.participant=username)\n and not exists (select * from absorptions where archived_as=username)\n \"\"\")\n assert len(orphans) == 0, \"missing elsewheres: {}\".format(list(orphans))\n\n def _check_orphans_no_tips(self):\n \"\"\"\n Finds participants\n * without elsewhere account attached\n * having non zero outstanding tip\n\n This should not happen because when we remove the last elsewhere account\n in take_over we also zero out all tips.\n \"\"\"\n tips_with_orphans = self.all(\"\"\"\n WITH orphans AS (\n SELECT username FROM participants\n WHERE NOT EXISTS (SELECT 1 FROM elsewhere WHERE participant=username)\n ), valid_tips AS (\n SELECT * FROM (\n SELECT DISTINCT ON (tipper, tippee) *\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS foo\n WHERE amount > 0\n )\n SELECT id FROM valid_tips\n WHERE tipper IN (SELECT * FROM orphans)\n OR tippee IN (SELECT * FROM orphans)\n \"\"\")\n known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'\n real = set(tips_with_orphans) - known\n assert len(real) == 0, real\n\n def _check_paydays_volumes(self):\n \"\"\"\n Recalculate *_volume fields in paydays table using exchanges table.\n \"\"\"\n charge_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, charge_volume, (\n select sum(amount+fee)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount > 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where charge_volume != ref\n \"\"\")\n assert len(charge_volume) == 0\n\n charge_fees_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, charge_fees_volume, (\n select sum(fee)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount > 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where charge_fees_volume != ref\n \"\"\")\n assert len(charge_fees_volume) == 0\n\n ach_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, ach_volume, (\n select sum(amount)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount < 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where ach_volume != ref\n \"\"\")\n assert len(ach_volume) == 0\n\n ach_fees_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, ach_fees_volume, (\n select sum(fee)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount < 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where ach_fees_volume != ref\n \"\"\")\n assert len(ach_fees_volume) == 0\n#\n", "path": "gittip/models/__init__.py"}], "after_files": [{"content": "\"\"\"\n\nThe most important object in the Gittip object model is Participant, and the\nsecond most important one is Ccommunity. There are a few others, but those are\nthe most important two. Participant, in particular, is at the center of\neverything on Gittip.\n\n\"\"\"\nfrom postgres import Postgres\n\nclass GittipDB(Postgres):\n\n def self_check(self):\n \"\"\"\n Runs all available self checks on the database.\n \"\"\"\n self._check_balances()\n self._check_tips()\n self._check_orphans()\n self._check_orphans_no_tips()\n self._check_paydays_volumes()\n\n def _check_tips(self):\n \"\"\"\n Checks that there are no rows in tips with duplicate (tipper, tippee, mtime).\n\n https://github.com/gittip/www.gittip.com/issues/1704\n \"\"\"\n conflicting_tips = self.one(\"\"\"\n SELECT count(*)\n FROM\n (\n SELECT * FROM tips\n EXCEPT\n SELECT DISTINCT ON(tipper, tippee, mtime) *\n FROM tips\n ORDER BY tipper, tippee, mtime\n ) AS foo\n \"\"\")\n assert conflicting_tips == 0\n\n def _check_balances(self):\n \"\"\"\n Recalculates balances for all participants from transfers and exchanges.\n\n https://github.com/gittip/www.gittip.com/issues/1118\n \"\"\"\n with self.get_cursor() as cursor:\n if cursor.one(\"select exists (select * from paydays where ts_end < ts_start) as running\"):\n # payday is running and the query bellow does not account for pending\n return\n b = cursor.one(\"\"\"\n select count(*)\n from (\n select username, sum(a) as balance\n from (\n select participant as username, sum(amount) as a\n from exchanges\n where amount > 0\n group by participant\n\n union\n\n select participant as username, sum(amount-fee) as a\n from exchanges\n where amount < 0\n group by participant\n\n union\n\n select tipper as username, sum(-amount) as a\n from transfers\n group by tipper\n\n union\n\n select tippee as username, sum(amount) as a\n from transfers\n group by tippee\n ) as foo\n group by username\n\n except\n\n select username, balance\n from participants\n ) as foo2\n \"\"\")\n assert b == 0, \"conflicting balances: {}\".format(b)\n\n def _check_orphans(self):\n \"\"\"\n Finds participants that\n * does not have corresponding elsewhere account\n * have not been absorbed by other participant\n\n These are broken because new participants arise from elsewhere\n and elsewhere is detached only by take over which makes a note\n in absorptions if it removes the last elsewhere account.\n\n Especially bad case is when also claimed_time is set because\n there must have been elsewhere account attached and used to sign in.\n\n https://github.com/gittip/www.gittip.com/issues/617\n \"\"\"\n orphans = self.all(\"\"\"\n select username\n from participants\n where not exists (select * from elsewhere where elsewhere.participant=username)\n and not exists (select * from absorptions where archived_as=username)\n \"\"\")\n assert len(orphans) == 0, \"missing elsewheres: {}\".format(list(orphans))\n\n def _check_orphans_no_tips(self):\n \"\"\"\n Finds participants\n * without elsewhere account attached\n * having non zero outstanding tip\n\n This should not happen because when we remove the last elsewhere account\n in take_over we also zero out all tips.\n \"\"\"\n tips_with_orphans = self.all(\"\"\"\n WITH orphans AS (\n SELECT username FROM participants\n WHERE NOT EXISTS (SELECT 1 FROM elsewhere WHERE participant=username)\n ), valid_tips AS (\n SELECT * FROM (\n SELECT DISTINCT ON (tipper, tippee) *\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS foo\n WHERE amount > 0\n )\n SELECT id FROM valid_tips\n WHERE tipper IN (SELECT * FROM orphans)\n OR tippee IN (SELECT * FROM orphans)\n \"\"\")\n known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'\n real = set(tips_with_orphans) - known\n assert len(real) == 0, real\n\n def _check_paydays_volumes(self):\n \"\"\"\n Recalculate *_volume fields in paydays table using exchanges table.\n \"\"\"\n charge_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, charge_volume, (\n select coalesce(sum(amount+fee), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount > 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where charge_volume != ref\n \"\"\")\n assert len(charge_volume) == 0\n\n charge_fees_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, charge_fees_volume, (\n select coalesce(sum(fee), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount > 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where charge_fees_volume != ref\n \"\"\")\n assert len(charge_fees_volume) == 0\n\n ach_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, ach_volume, (\n select coalesce(sum(amount), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount < 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where ach_volume != ref\n \"\"\")\n assert len(ach_volume) == 0\n\n ach_fees_volume = self.all(\"\"\"\n select * from (\n select id, ts_start, ach_fees_volume, (\n select coalesce(sum(fee), 0)\n from exchanges\n where timestamp > ts_start\n and timestamp < ts_end\n and amount < 0\n ) as ref\n from paydays\n order by id\n ) as foo\n where ach_fees_volume != ref\n \"\"\")\n assert len(ach_fees_volume) == 0\n#\n", "path": "gittip/models/__init__.py"}]} | 2,542 | 348 |
gh_patches_debug_12414 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-4465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Maybe_send_embed inconsistent behavior for cog developers
# Other bugs
#### What were you trying to do?
Use `maybe_send_embed` to send an empty string
#### What were you expecting to happen?
Consistent behavior with an error, failing to send an empty message per:
`discord.errors.HTTPException: 400 Bad Request (error code: 50006): Cannot send an empty message`
#### What actually happened?
Only caused an error when embeds were off, no error when embeds were on.
#### How can we reproduce this issue?
`[p]eval await ctx.maybe_send_embed("")`
`[p]embedset global`
`[p]eval await ctx.maybe_send_embed("")`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/core/commands/context.py`
Content:
```
1 from __future__ import annotations
2
3 import asyncio
4 import contextlib
5 import os
6 import re
7 from typing import Iterable, List, Union, Optional, TYPE_CHECKING
8 import discord
9 from discord.ext.commands import Context as DPYContext
10
11 from .requires import PermState
12 from ..utils.chat_formatting import box
13 from ..utils.predicates import MessagePredicate
14 from ..utils import common_filters
15
16 if TYPE_CHECKING:
17 from .commands import Command
18 from ..bot import Red
19
20 TICK = "\N{WHITE HEAVY CHECK MARK}"
21
22 __all__ = ["Context", "GuildContext", "DMContext"]
23
24
25 class Context(DPYContext):
26 """Command invocation context for Red.
27
28 All context passed into commands will be of this type.
29
30 This class inherits from `discord.ext.commands.Context`.
31
32 Attributes
33 ----------
34 assume_yes: bool
35 Whether or not interactive checks should
36 be skipped and assumed to be confirmed.
37
38 This is intended for allowing automation of tasks.
39
40 An example of this would be scheduled commands
41 not requiring interaction if the cog developer
42 checks this value prior to confirming something interactively.
43
44 Depending on the potential impact of a command,
45 it may still be appropriate not to use this setting.
46 permission_state: PermState
47 The permission state the current context is in.
48 """
49
50 command: "Command"
51 invoked_subcommand: "Optional[Command]"
52 bot: "Red"
53
54 def __init__(self, **attrs):
55 self.assume_yes = attrs.pop("assume_yes", False)
56 super().__init__(**attrs)
57 self.permission_state: PermState = PermState.NORMAL
58
59 async def send(self, content=None, **kwargs):
60 """Sends a message to the destination with the content given.
61
62 This acts the same as `discord.ext.commands.Context.send`, with
63 one added keyword argument as detailed below in *Other Parameters*.
64
65 Parameters
66 ----------
67 content : str
68 The content of the message to send.
69
70 Other Parameters
71 ----------------
72 filter : callable (`str`) -> `str`, optional
73 A function which is used to filter the ``content`` before
74 it is sent.
75 This must take a single `str` as an argument, and return
76 the processed `str`. When `None` is passed, ``content`` won't be touched.
77 Defaults to `None`.
78 **kwargs
79 See `discord.ext.commands.Context.send`.
80
81 Returns
82 -------
83 discord.Message
84 The message that was sent.
85
86 """
87
88 _filter = kwargs.pop("filter", None)
89
90 if _filter and content:
91 content = _filter(str(content))
92
93 return await super().send(content=content, **kwargs)
94
95 async def send_help(self, command=None):
96 """ Send the command help message. """
97 # This allows people to manually use this similarly
98 # to the upstream d.py version, while retaining our use.
99 command = command or self.command
100 await self.bot.send_help_for(self, command)
101
102 async def tick(self) -> bool:
103 """Add a tick reaction to the command message.
104
105 Returns
106 -------
107 bool
108 :code:`True` if adding the reaction succeeded.
109
110 """
111 try:
112 await self.message.add_reaction(TICK)
113 except discord.HTTPException:
114 return False
115 else:
116 return True
117
118 async def react_quietly(
119 self, reaction: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str]
120 ) -> bool:
121 """Adds a reaction to to the command message.
122
123 Returns
124 -------
125 bool
126 :code:`True` if adding the reaction succeeded.
127 """
128 try:
129 await self.message.add_reaction(reaction)
130 except discord.HTTPException:
131 return False
132 else:
133 return True
134
135 async def send_interactive(
136 self, messages: Iterable[str], box_lang: str = None, timeout: int = 15
137 ) -> List[discord.Message]:
138 """Send multiple messages interactively.
139
140 The user will be prompted for whether or not they would like to view
141 the next message, one at a time. They will also be notified of how
142 many messages are remaining on each prompt.
143
144 Parameters
145 ----------
146 messages : `iterable` of `str`
147 The messages to send.
148 box_lang : str
149 If specified, each message will be contained within a codeblock of
150 this language.
151 timeout : int
152 How long the user has to respond to the prompt before it times out.
153 After timing out, the bot deletes its prompt message.
154
155 """
156 messages = tuple(messages)
157 ret = []
158
159 for idx, page in enumerate(messages, 1):
160 if box_lang is None:
161 msg = await self.send(page)
162 else:
163 msg = await self.send(box(page, lang=box_lang))
164 ret.append(msg)
165 n_remaining = len(messages) - idx
166 if n_remaining > 0:
167 if n_remaining == 1:
168 plural = ""
169 is_are = "is"
170 else:
171 plural = "s"
172 is_are = "are"
173 query = await self.send(
174 "There {} still {} message{} remaining. "
175 "Type `more` to continue."
176 "".format(is_are, n_remaining, plural)
177 )
178 try:
179 resp = await self.bot.wait_for(
180 "message",
181 check=MessagePredicate.lower_equal_to("more", self),
182 timeout=timeout,
183 )
184 except asyncio.TimeoutError:
185 with contextlib.suppress(discord.HTTPException):
186 await query.delete()
187 break
188 else:
189 try:
190 await self.channel.delete_messages((query, resp))
191 except (discord.HTTPException, AttributeError):
192 # In case the bot can't delete other users' messages,
193 # or is not a bot account
194 # or channel is a DM
195 with contextlib.suppress(discord.HTTPException):
196 await query.delete()
197 return ret
198
199 async def embed_colour(self):
200 """
201 Helper function to get the colour for an embed.
202
203 Returns
204 -------
205 discord.Colour:
206 The colour to be used
207 """
208 return await self.bot.get_embed_color(self)
209
210 @property
211 def embed_color(self):
212 # Rather than double awaiting.
213 return self.embed_colour
214
215 async def embed_requested(self):
216 """
217 Simple helper to call bot.embed_requested
218 with logic around if embed permissions are available
219
220 Returns
221 -------
222 bool:
223 :code:`True` if an embed is requested
224 """
225 if self.guild and not self.channel.permissions_for(self.guild.me).embed_links:
226 return False
227 return await self.bot.embed_requested(self.channel, self.author, command=self.command)
228
229 async def maybe_send_embed(self, message: str) -> discord.Message:
230 """
231 Simple helper to send a simple message to context
232 without manually checking ctx.embed_requested
233 This should only be used for simple messages.
234
235 Parameters
236 ----------
237 message: `str`
238 The string to send
239
240 Returns
241 -------
242 discord.Message:
243 the message which was sent
244
245 Raises
246 ------
247 discord.Forbidden
248 see `discord.abc.Messageable.send`
249 discord.HTTPException
250 see `discord.abc.Messageable.send`
251 """
252
253 if await self.embed_requested():
254 return await self.send(
255 embed=discord.Embed(description=message, color=(await self.embed_colour()))
256 )
257 else:
258 return await self.send(
259 message,
260 allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False),
261 )
262
263 @property
264 def clean_prefix(self) -> str:
265 """
266 str: The command prefix, but with a sanitized version of the bot's mention if it was used as prefix.
267 This can be used in a context where discord user mentions might not render properly.
268 """
269 me = self.me
270 pattern = re.compile(rf"<@!?{me.id}>")
271 return pattern.sub(f"@{me.display_name}".replace("\\", r"\\"), self.prefix)
272
273 @property
274 def me(self) -> Union[discord.ClientUser, discord.Member]:
275 """
276 discord.abc.User: The bot member or user object.
277
278 If the context is DM, this will be a `discord.User` object.
279 """
280 if self.guild is not None:
281 return self.guild.me
282 else:
283 return self.bot.user
284
285
286 if TYPE_CHECKING or os.getenv("BUILDING_DOCS", False):
287
288 class DMContext(Context):
289 """
290 At runtime, this will still be a normal context object.
291
292 This lies about some type narrowing for type analysis in commands
293 using a dm_only decorator.
294
295 It is only correct to use when those types are already narrowed
296 """
297
298 @property
299 def author(self) -> discord.User:
300 ...
301
302 @property
303 def channel(self) -> discord.DMChannel:
304 ...
305
306 @property
307 def guild(self) -> None:
308 ...
309
310 @property
311 def me(self) -> discord.ClientUser:
312 ...
313
314 class GuildContext(Context):
315 """
316 At runtime, this will still be a normal context object.
317
318 This lies about some type narrowing for type analysis in commands
319 using a guild_only decorator.
320
321 It is only correct to use when those types are already narrowed
322 """
323
324 @property
325 def author(self) -> discord.Member:
326 ...
327
328 @property
329 def channel(self) -> discord.TextChannel:
330 ...
331
332 @property
333 def guild(self) -> discord.Guild:
334 ...
335
336 @property
337 def me(self) -> discord.Member:
338 ...
339
340
341 else:
342 GuildContext = Context
343 DMContext = Context
344
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/core/commands/context.py b/redbot/core/commands/context.py
--- a/redbot/core/commands/context.py
+++ b/redbot/core/commands/context.py
@@ -248,8 +248,11 @@
see `discord.abc.Messageable.send`
discord.HTTPException
see `discord.abc.Messageable.send`
+ ValueError
+ when the message's length is not between 1 and 2000 characters.
"""
-
+ if not message or len(message) > 2000:
+ raise ValueError("Message length must be between 1 and 2000")
if await self.embed_requested():
return await self.send(
embed=discord.Embed(description=message, color=(await self.embed_colour()))
| {"golden_diff": "diff --git a/redbot/core/commands/context.py b/redbot/core/commands/context.py\n--- a/redbot/core/commands/context.py\n+++ b/redbot/core/commands/context.py\n@@ -248,8 +248,11 @@\n see `discord.abc.Messageable.send`\n discord.HTTPException\n see `discord.abc.Messageable.send`\n+ ValueError\n+ when the message's length is not between 1 and 2000 characters.\n \"\"\"\n-\n+ if not message or len(message) > 2000:\n+ raise ValueError(\"Message length must be between 1 and 2000\")\n if await self.embed_requested():\n return await self.send(\n embed=discord.Embed(description=message, color=(await self.embed_colour()))\n", "issue": "Maybe_send_embed inconsistent behavior for cog developers\n# Other bugs\r\n\r\n\r\n#### What were you trying to do?\r\n\r\nUse `maybe_send_embed` to send an empty string\r\n\r\n#### What were you expecting to happen?\r\n\r\nConsistent behavior with an error, failing to send an empty message per:\r\n`discord.errors.HTTPException: 400 Bad Request (error code: 50006): Cannot send an empty message`\r\n\r\n#### What actually happened?\r\n\r\nOnly caused an error when embeds were off, no error when embeds were on.\r\n\r\n#### How can we reproduce this issue?\r\n\r\n`[p]eval await ctx.maybe_send_embed(\"\")`\r\n`[p]embedset global`\r\n`[p]eval await ctx.maybe_send_embed(\"\")`\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport os\nimport re\nfrom typing import Iterable, List, Union, Optional, TYPE_CHECKING\nimport discord\nfrom discord.ext.commands import Context as DPYContext\n\nfrom .requires import PermState\nfrom ..utils.chat_formatting import box\nfrom ..utils.predicates import MessagePredicate\nfrom ..utils import common_filters\n\nif TYPE_CHECKING:\n from .commands import Command\n from ..bot import Red\n\nTICK = \"\\N{WHITE HEAVY CHECK MARK}\"\n\n__all__ = [\"Context\", \"GuildContext\", \"DMContext\"]\n\n\nclass Context(DPYContext):\n \"\"\"Command invocation context for Red.\n\n All context passed into commands will be of this type.\n\n This class inherits from `discord.ext.commands.Context`.\n\n Attributes\n ----------\n assume_yes: bool\n Whether or not interactive checks should\n be skipped and assumed to be confirmed.\n\n This is intended for allowing automation of tasks.\n\n An example of this would be scheduled commands\n not requiring interaction if the cog developer\n checks this value prior to confirming something interactively.\n\n Depending on the potential impact of a command,\n it may still be appropriate not to use this setting.\n permission_state: PermState\n The permission state the current context is in.\n \"\"\"\n\n command: \"Command\"\n invoked_subcommand: \"Optional[Command]\"\n bot: \"Red\"\n\n def __init__(self, **attrs):\n self.assume_yes = attrs.pop(\"assume_yes\", False)\n super().__init__(**attrs)\n self.permission_state: PermState = PermState.NORMAL\n\n async def send(self, content=None, **kwargs):\n \"\"\"Sends a message to the destination with the content given.\n\n This acts the same as `discord.ext.commands.Context.send`, with\n one added keyword argument as detailed below in *Other Parameters*.\n\n Parameters\n ----------\n content : str\n The content of the message to send.\n\n Other Parameters\n ----------------\n filter : callable (`str`) -> `str`, optional\n A function which is used to filter the ``content`` before\n it is sent.\n This must take a single `str` as an argument, and return\n the processed `str`. When `None` is passed, ``content`` won't be touched.\n Defaults to `None`.\n **kwargs\n See `discord.ext.commands.Context.send`.\n\n Returns\n -------\n discord.Message\n The message that was sent.\n\n \"\"\"\n\n _filter = kwargs.pop(\"filter\", None)\n\n if _filter and content:\n content = _filter(str(content))\n\n return await super().send(content=content, **kwargs)\n\n async def send_help(self, command=None):\n \"\"\" Send the command help message. \"\"\"\n # This allows people to manually use this similarly\n # to the upstream d.py version, while retaining our use.\n command = command or self.command\n await self.bot.send_help_for(self, command)\n\n async def tick(self) -> bool:\n \"\"\"Add a tick reaction to the command message.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n\n \"\"\"\n try:\n await self.message.add_reaction(TICK)\n except discord.HTTPException:\n return False\n else:\n return True\n\n async def react_quietly(\n self, reaction: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str]\n ) -> bool:\n \"\"\"Adds a reaction to to the command message.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n \"\"\"\n try:\n await self.message.add_reaction(reaction)\n except discord.HTTPException:\n return False\n else:\n return True\n\n async def send_interactive(\n self, messages: Iterable[str], box_lang: str = None, timeout: int = 15\n ) -> List[discord.Message]:\n \"\"\"Send multiple messages interactively.\n\n The user will be prompted for whether or not they would like to view\n the next message, one at a time. They will also be notified of how\n many messages are remaining on each prompt.\n\n Parameters\n ----------\n messages : `iterable` of `str`\n The messages to send.\n box_lang : str\n If specified, each message will be contained within a codeblock of\n this language.\n timeout : int\n How long the user has to respond to the prompt before it times out.\n After timing out, the bot deletes its prompt message.\n\n \"\"\"\n messages = tuple(messages)\n ret = []\n\n for idx, page in enumerate(messages, 1):\n if box_lang is None:\n msg = await self.send(page)\n else:\n msg = await self.send(box(page, lang=box_lang))\n ret.append(msg)\n n_remaining = len(messages) - idx\n if n_remaining > 0:\n if n_remaining == 1:\n plural = \"\"\n is_are = \"is\"\n else:\n plural = \"s\"\n is_are = \"are\"\n query = await self.send(\n \"There {} still {} message{} remaining. \"\n \"Type `more` to continue.\"\n \"\".format(is_are, n_remaining, plural)\n )\n try:\n resp = await self.bot.wait_for(\n \"message\",\n check=MessagePredicate.lower_equal_to(\"more\", self),\n timeout=timeout,\n )\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n break\n else:\n try:\n await self.channel.delete_messages((query, resp))\n except (discord.HTTPException, AttributeError):\n # In case the bot can't delete other users' messages,\n # or is not a bot account\n # or channel is a DM\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n return ret\n\n async def embed_colour(self):\n \"\"\"\n Helper function to get the colour for an embed.\n\n Returns\n -------\n discord.Colour:\n The colour to be used\n \"\"\"\n return await self.bot.get_embed_color(self)\n\n @property\n def embed_color(self):\n # Rather than double awaiting.\n return self.embed_colour\n\n async def embed_requested(self):\n \"\"\"\n Simple helper to call bot.embed_requested\n with logic around if embed permissions are available\n\n Returns\n -------\n bool:\n :code:`True` if an embed is requested\n \"\"\"\n if self.guild and not self.channel.permissions_for(self.guild.me).embed_links:\n return False\n return await self.bot.embed_requested(self.channel, self.author, command=self.command)\n\n async def maybe_send_embed(self, message: str) -> discord.Message:\n \"\"\"\n Simple helper to send a simple message to context\n without manually checking ctx.embed_requested\n This should only be used for simple messages.\n\n Parameters\n ----------\n message: `str`\n The string to send\n\n Returns\n -------\n discord.Message:\n the message which was sent\n\n Raises\n ------\n discord.Forbidden\n see `discord.abc.Messageable.send`\n discord.HTTPException\n see `discord.abc.Messageable.send`\n \"\"\"\n\n if await self.embed_requested():\n return await self.send(\n embed=discord.Embed(description=message, color=(await self.embed_colour()))\n )\n else:\n return await self.send(\n message,\n allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False),\n )\n\n @property\n def clean_prefix(self) -> str:\n \"\"\"\n str: The command prefix, but with a sanitized version of the bot's mention if it was used as prefix.\n This can be used in a context where discord user mentions might not render properly.\n \"\"\"\n me = self.me\n pattern = re.compile(rf\"<@!?{me.id}>\")\n return pattern.sub(f\"@{me.display_name}\".replace(\"\\\\\", r\"\\\\\"), self.prefix)\n\n @property\n def me(self) -> Union[discord.ClientUser, discord.Member]:\n \"\"\"\n discord.abc.User: The bot member or user object.\n\n If the context is DM, this will be a `discord.User` object.\n \"\"\"\n if self.guild is not None:\n return self.guild.me\n else:\n return self.bot.user\n\n\nif TYPE_CHECKING or os.getenv(\"BUILDING_DOCS\", False):\n\n class DMContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a dm_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.User:\n ...\n\n @property\n def channel(self) -> discord.DMChannel:\n ...\n\n @property\n def guild(self) -> None:\n ...\n\n @property\n def me(self) -> discord.ClientUser:\n ...\n\n class GuildContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a guild_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.Member:\n ...\n\n @property\n def channel(self) -> discord.TextChannel:\n ...\n\n @property\n def guild(self) -> discord.Guild:\n ...\n\n @property\n def me(self) -> discord.Member:\n ...\n\n\nelse:\n GuildContext = Context\n DMContext = Context\n", "path": "redbot/core/commands/context.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport os\nimport re\nfrom typing import Iterable, List, Union, Optional, TYPE_CHECKING\nimport discord\nfrom discord.ext.commands import Context as DPYContext\n\nfrom .requires import PermState\nfrom ..utils.chat_formatting import box\nfrom ..utils.predicates import MessagePredicate\nfrom ..utils import common_filters\n\nif TYPE_CHECKING:\n from .commands import Command\n from ..bot import Red\n\nTICK = \"\\N{WHITE HEAVY CHECK MARK}\"\n\n__all__ = [\"Context\", \"GuildContext\", \"DMContext\"]\n\n\nclass Context(DPYContext):\n \"\"\"Command invocation context for Red.\n\n All context passed into commands will be of this type.\n\n This class inherits from `discord.ext.commands.Context`.\n\n Attributes\n ----------\n assume_yes: bool\n Whether or not interactive checks should\n be skipped and assumed to be confirmed.\n\n This is intended for allowing automation of tasks.\n\n An example of this would be scheduled commands\n not requiring interaction if the cog developer\n checks this value prior to confirming something interactively.\n\n Depending on the potential impact of a command,\n it may still be appropriate not to use this setting.\n permission_state: PermState\n The permission state the current context is in.\n \"\"\"\n\n command: \"Command\"\n invoked_subcommand: \"Optional[Command]\"\n bot: \"Red\"\n\n def __init__(self, **attrs):\n self.assume_yes = attrs.pop(\"assume_yes\", False)\n super().__init__(**attrs)\n self.permission_state: PermState = PermState.NORMAL\n\n async def send(self, content=None, **kwargs):\n \"\"\"Sends a message to the destination with the content given.\n\n This acts the same as `discord.ext.commands.Context.send`, with\n one added keyword argument as detailed below in *Other Parameters*.\n\n Parameters\n ----------\n content : str\n The content of the message to send.\n\n Other Parameters\n ----------------\n filter : callable (`str`) -> `str`, optional\n A function which is used to filter the ``content`` before\n it is sent.\n This must take a single `str` as an argument, and return\n the processed `str`. When `None` is passed, ``content`` won't be touched.\n Defaults to `None`.\n **kwargs\n See `discord.ext.commands.Context.send`.\n\n Returns\n -------\n discord.Message\n The message that was sent.\n\n \"\"\"\n\n _filter = kwargs.pop(\"filter\", None)\n\n if _filter and content:\n content = _filter(str(content))\n\n return await super().send(content=content, **kwargs)\n\n async def send_help(self, command=None):\n \"\"\" Send the command help message. \"\"\"\n # This allows people to manually use this similarly\n # to the upstream d.py version, while retaining our use.\n command = command or self.command\n await self.bot.send_help_for(self, command)\n\n async def tick(self) -> bool:\n \"\"\"Add a tick reaction to the command message.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n\n \"\"\"\n try:\n await self.message.add_reaction(TICK)\n except discord.HTTPException:\n return False\n else:\n return True\n\n async def react_quietly(\n self, reaction: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str]\n ) -> bool:\n \"\"\"Adds a reaction to to the command message.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n \"\"\"\n try:\n await self.message.add_reaction(reaction)\n except discord.HTTPException:\n return False\n else:\n return True\n\n async def send_interactive(\n self, messages: Iterable[str], box_lang: str = None, timeout: int = 15\n ) -> List[discord.Message]:\n \"\"\"Send multiple messages interactively.\n\n The user will be prompted for whether or not they would like to view\n the next message, one at a time. They will also be notified of how\n many messages are remaining on each prompt.\n\n Parameters\n ----------\n messages : `iterable` of `str`\n The messages to send.\n box_lang : str\n If specified, each message will be contained within a codeblock of\n this language.\n timeout : int\n How long the user has to respond to the prompt before it times out.\n After timing out, the bot deletes its prompt message.\n\n \"\"\"\n messages = tuple(messages)\n ret = []\n\n for idx, page in enumerate(messages, 1):\n if box_lang is None:\n msg = await self.send(page)\n else:\n msg = await self.send(box(page, lang=box_lang))\n ret.append(msg)\n n_remaining = len(messages) - idx\n if n_remaining > 0:\n if n_remaining == 1:\n plural = \"\"\n is_are = \"is\"\n else:\n plural = \"s\"\n is_are = \"are\"\n query = await self.send(\n \"There {} still {} message{} remaining. \"\n \"Type `more` to continue.\"\n \"\".format(is_are, n_remaining, plural)\n )\n try:\n resp = await self.bot.wait_for(\n \"message\",\n check=MessagePredicate.lower_equal_to(\"more\", self),\n timeout=timeout,\n )\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n break\n else:\n try:\n await self.channel.delete_messages((query, resp))\n except (discord.HTTPException, AttributeError):\n # In case the bot can't delete other users' messages,\n # or is not a bot account\n # or channel is a DM\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n return ret\n\n async def embed_colour(self):\n \"\"\"\n Helper function to get the colour for an embed.\n\n Returns\n -------\n discord.Colour:\n The colour to be used\n \"\"\"\n return await self.bot.get_embed_color(self)\n\n @property\n def embed_color(self):\n # Rather than double awaiting.\n return self.embed_colour\n\n async def embed_requested(self):\n \"\"\"\n Simple helper to call bot.embed_requested\n with logic around if embed permissions are available\n\n Returns\n -------\n bool:\n :code:`True` if an embed is requested\n \"\"\"\n if self.guild and not self.channel.permissions_for(self.guild.me).embed_links:\n return False\n return await self.bot.embed_requested(self.channel, self.author, command=self.command)\n\n async def maybe_send_embed(self, message: str) -> discord.Message:\n \"\"\"\n Simple helper to send a simple message to context\n without manually checking ctx.embed_requested\n This should only be used for simple messages.\n\n Parameters\n ----------\n message: `str`\n The string to send\n\n Returns\n -------\n discord.Message:\n the message which was sent\n\n Raises\n ------\n discord.Forbidden\n see `discord.abc.Messageable.send`\n discord.HTTPException\n see `discord.abc.Messageable.send`\n ValueError\n when the message's length is not between 1 and 2000 characters.\n \"\"\"\n if not message or len(message) > 2000:\n raise ValueError(\"Message length must be between 1 and 2000\")\n if await self.embed_requested():\n return await self.send(\n embed=discord.Embed(description=message, color=(await self.embed_colour()))\n )\n else:\n return await self.send(\n message,\n allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False),\n )\n\n @property\n def clean_prefix(self) -> str:\n \"\"\"\n str: The command prefix, but with a sanitized version of the bot's mention if it was used as prefix.\n This can be used in a context where discord user mentions might not render properly.\n \"\"\"\n me = self.me\n pattern = re.compile(rf\"<@!?{me.id}>\")\n return pattern.sub(f\"@{me.display_name}\".replace(\"\\\\\", r\"\\\\\"), self.prefix)\n\n @property\n def me(self) -> Union[discord.ClientUser, discord.Member]:\n \"\"\"\n discord.abc.User: The bot member or user object.\n\n If the context is DM, this will be a `discord.User` object.\n \"\"\"\n if self.guild is not None:\n return self.guild.me\n else:\n return self.bot.user\n\n\nif TYPE_CHECKING or os.getenv(\"BUILDING_DOCS\", False):\n\n class DMContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a dm_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.User:\n ...\n\n @property\n def channel(self) -> discord.DMChannel:\n ...\n\n @property\n def guild(self) -> None:\n ...\n\n @property\n def me(self) -> discord.ClientUser:\n ...\n\n class GuildContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a guild_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.Member:\n ...\n\n @property\n def channel(self) -> discord.TextChannel:\n ...\n\n @property\n def guild(self) -> discord.Guild:\n ...\n\n @property\n def me(self) -> discord.Member:\n ...\n\n\nelse:\n GuildContext = Context\n DMContext = Context\n", "path": "redbot/core/commands/context.py"}]} | 3,452 | 170 |
gh_patches_debug_34545 | rasdani/github-patches | git_diff | feast-dev__feast-1771 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FeatureService should support "description" as first class named-value argument as Entity does
## Expected Behavior
...
```
# Define your feature service and the features it will serve
driver_fs = FeatureService(name="driver_ranking_fv_svc",
features=[driver_hourly_stats_view],
description="Used for training an ElasticNetCV model"})
```
```
feast feature-services describe driver_ranking_fv_svc
spec:
name: driver_ranking_fv_svc
features:
- featureViewName: driver_hourly_stats
featureColumns:
- name: conv_rate
valueType: FLOAT
- name: acc_rate
valueType: FLOAT
- name: avg_daily_trips
valueType: INT64
description: Used for training an ElasticNetCV model
meta: {}
```
Extend the argument list in the [FeatureService ](https://github.com/feast-dev/feast/blob/7dff49a194a25a62927c1ee7022caf0651f68f38/sdk/python/feast/feature_service.py#L24)to support `description=None` as default
## Current Behavior
```
# Define your feature service and the features it will serve
driver_fs = FeatureService(name="driver_ranking_fv_svc",
features=[driver_hourly_stats_view],
tags={"description": "Used for training an ElasticNetCV model"})
```
Then query the FS from the cli
```
feast feature-services describe driver_ranking_fv_svc
spec:
name: driver_ranking_fv_svc
features:
- featureViewName: driver_hourly_stats
featureColumns:
- name: conv_rate
valueType: FLOAT
- name: acc_rate
valueType: FLOAT
- name: avg_daily_trips
valueType: INT64
tags:
description: Used for training an ElasticNetCV model
meta: {}
```
## Steps to reproduce
Follow the steps above in the current behavior
### Specifications
- Version: v0.12v
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/feature_service.py`
Content:
```
1 from datetime import datetime
2 from typing import Dict, List, Optional, Union
3
4 from google.protobuf.json_format import MessageToJson
5
6 from feast.feature_table import FeatureTable
7 from feast.feature_view import FeatureView
8 from feast.feature_view_projection import FeatureViewProjection
9 from feast.protos.feast.core.FeatureService_pb2 import (
10 FeatureService as FeatureServiceProto,
11 )
12 from feast.protos.feast.core.FeatureService_pb2 import (
13 FeatureServiceMeta,
14 FeatureServiceSpec,
15 )
16
17
18 class FeatureService:
19 """
20 A feature service is a logical grouping of features for retrieval (training or serving).
21 The features grouped by a feature service may come from any number of feature views.
22
23 Args:
24 name: Unique name of the feature service.
25 features: A list of Features that are grouped as part of this FeatureService.
26 The list may contain Feature Views, Feature Tables, or a subset of either.
27 tags (optional): A dictionary of key-value pairs used for organizing Feature
28 Services.
29 """
30
31 name: str
32 features: List[FeatureViewProjection]
33 tags: Dict[str, str]
34 created_timestamp: Optional[datetime] = None
35 last_updated_timestamp: Optional[datetime] = None
36
37 def __init__(
38 self,
39 name: str,
40 features: List[Union[FeatureTable, FeatureView, FeatureViewProjection]],
41 tags: Optional[Dict[str, str]] = None,
42 ):
43 """
44 Creates a FeatureService object.
45
46 Raises:
47 ValueError: If one of the specified features is not a valid type.
48 """
49 self.name = name
50 self.features = []
51 for feature in features:
52 if isinstance(feature, FeatureTable) or isinstance(feature, FeatureView):
53 self.features.append(FeatureViewProjection.from_definition(feature))
54 elif isinstance(feature, FeatureViewProjection):
55 self.features.append(feature)
56 else:
57 raise ValueError(f"Unexpected type: {type(feature)}")
58 self.tags = tags or {}
59 self.created_timestamp = None
60 self.last_updated_timestamp = None
61
62 def __repr__(self):
63 items = (f"{k} = {v}" for k, v in self.__dict__.items())
64 return f"<{self.__class__.__name__}({', '.join(items)})>"
65
66 def __str__(self):
67 return str(MessageToJson(self.to_proto()))
68
69 def __hash__(self):
70 return hash(self.name)
71
72 def __eq__(self, other):
73 if not isinstance(other, FeatureService):
74 raise TypeError(
75 "Comparisons should only involve FeatureService class objects."
76 )
77 if self.tags != other.tags or self.name != other.name:
78 return False
79
80 if sorted(self.features) != sorted(other.features):
81 return False
82
83 return True
84
85 @staticmethod
86 def from_proto(feature_service_proto: FeatureServiceProto):
87 """
88 Converts a FeatureServiceProto to a FeatureService object.
89
90 Args:
91 feature_service_proto: A protobuf representation of a FeatureService.
92 """
93 fs = FeatureService(
94 name=feature_service_proto.spec.name,
95 features=[
96 FeatureViewProjection.from_proto(fp)
97 for fp in feature_service_proto.spec.features
98 ],
99 tags=dict(feature_service_proto.spec.tags),
100 )
101
102 if feature_service_proto.meta.HasField("created_timestamp"):
103 fs.created_timestamp = (
104 feature_service_proto.meta.created_timestamp.ToDatetime()
105 )
106 if feature_service_proto.meta.HasField("last_updated_timestamp"):
107 fs.last_updated_timestamp = (
108 feature_service_proto.meta.last_updated_timestamp.ToDatetime()
109 )
110
111 return fs
112
113 def to_proto(self) -> FeatureServiceProto:
114 """
115 Converts a FeatureService to its protobuf representation.
116
117 Returns:
118 A FeatureServiceProto protobuf.
119 """
120 meta = FeatureServiceMeta()
121 if self.created_timestamp:
122 meta.created_timestamp.FromDatetime(self.created_timestamp)
123
124 spec = FeatureServiceSpec()
125 spec.name = self.name
126 for definition in self.features:
127 if isinstance(definition, FeatureTable) or isinstance(
128 definition, FeatureView
129 ):
130 feature_ref = FeatureViewProjection(
131 definition.name, definition.features
132 )
133 else:
134 feature_ref = definition
135
136 spec.features.append(feature_ref.to_proto())
137
138 if self.tags:
139 spec.tags.update(self.tags)
140
141 feature_service_proto = FeatureServiceProto(spec=spec, meta=meta)
142 return feature_service_proto
143
144 def validate(self):
145 pass
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/feature_service.py b/sdk/python/feast/feature_service.py
--- a/sdk/python/feast/feature_service.py
+++ b/sdk/python/feast/feature_service.py
@@ -31,6 +31,7 @@
name: str
features: List[FeatureViewProjection]
tags: Dict[str, str]
+ description: Optional[str] = None
created_timestamp: Optional[datetime] = None
last_updated_timestamp: Optional[datetime] = None
@@ -39,6 +40,7 @@
name: str,
features: List[Union[FeatureTable, FeatureView, FeatureViewProjection]],
tags: Optional[Dict[str, str]] = None,
+ description: Optional[str] = None,
):
"""
Creates a FeatureService object.
@@ -56,6 +58,7 @@
else:
raise ValueError(f"Unexpected type: {type(feature)}")
self.tags = tags or {}
+ self.description = description
self.created_timestamp = None
self.last_updated_timestamp = None
@@ -97,6 +100,11 @@
for fp in feature_service_proto.spec.features
],
tags=dict(feature_service_proto.spec.tags),
+ description=(
+ feature_service_proto.spec.description
+ if feature_service_proto.spec.description != ""
+ else None
+ ),
)
if feature_service_proto.meta.HasField("created_timestamp"):
@@ -137,6 +145,8 @@
if self.tags:
spec.tags.update(self.tags)
+ if self.description:
+ spec.description = self.description
feature_service_proto = FeatureServiceProto(spec=spec, meta=meta)
return feature_service_proto
| {"golden_diff": "diff --git a/sdk/python/feast/feature_service.py b/sdk/python/feast/feature_service.py\n--- a/sdk/python/feast/feature_service.py\n+++ b/sdk/python/feast/feature_service.py\n@@ -31,6 +31,7 @@\n name: str\n features: List[FeatureViewProjection]\n tags: Dict[str, str]\n+ description: Optional[str] = None\n created_timestamp: Optional[datetime] = None\n last_updated_timestamp: Optional[datetime] = None\n \n@@ -39,6 +40,7 @@\n name: str,\n features: List[Union[FeatureTable, FeatureView, FeatureViewProjection]],\n tags: Optional[Dict[str, str]] = None,\n+ description: Optional[str] = None,\n ):\n \"\"\"\n Creates a FeatureService object.\n@@ -56,6 +58,7 @@\n else:\n raise ValueError(f\"Unexpected type: {type(feature)}\")\n self.tags = tags or {}\n+ self.description = description\n self.created_timestamp = None\n self.last_updated_timestamp = None\n \n@@ -97,6 +100,11 @@\n for fp in feature_service_proto.spec.features\n ],\n tags=dict(feature_service_proto.spec.tags),\n+ description=(\n+ feature_service_proto.spec.description\n+ if feature_service_proto.spec.description != \"\"\n+ else None\n+ ),\n )\n \n if feature_service_proto.meta.HasField(\"created_timestamp\"):\n@@ -137,6 +145,8 @@\n \n if self.tags:\n spec.tags.update(self.tags)\n+ if self.description:\n+ spec.description = self.description\n \n feature_service_proto = FeatureServiceProto(spec=spec, meta=meta)\n return feature_service_proto\n", "issue": "FeatureService should support \"description\" as first class named-value argument as Entity does\n## Expected Behavior \r\n...\r\n```\r\n# Define your feature service and the features it will serve\r\ndriver_fs = FeatureService(name=\"driver_ranking_fv_svc\",\r\n features=[driver_hourly_stats_view],\r\n description=\"Used for training an ElasticNetCV model\"})\r\n```\r\n```\r\nfeast feature-services describe driver_ranking_fv_svc\r\nspec:\r\n name: driver_ranking_fv_svc\r\n features:\r\n - featureViewName: driver_hourly_stats\r\n featureColumns:\r\n - name: conv_rate\r\n valueType: FLOAT\r\n - name: acc_rate\r\n valueType: FLOAT\r\n - name: avg_daily_trips\r\n valueType: INT64\r\ndescription: Used for training an ElasticNetCV model\r\nmeta: {}\r\n```\r\nExtend the argument list in the [FeatureService ](https://github.com/feast-dev/feast/blob/7dff49a194a25a62927c1ee7022caf0651f68f38/sdk/python/feast/feature_service.py#L24)to support `description=None` as default\r\n## Current Behavior\r\n\r\n```\r\n# Define your feature service and the features it will serve\r\ndriver_fs = FeatureService(name=\"driver_ranking_fv_svc\",\r\n features=[driver_hourly_stats_view],\r\n tags={\"description\": \"Used for training an ElasticNetCV model\"})\r\n```\r\nThen query the FS from the cli\r\n\r\n```\r\nfeast feature-services describe driver_ranking_fv_svc\r\nspec:\r\n name: driver_ranking_fv_svc\r\n features:\r\n - featureViewName: driver_hourly_stats\r\n featureColumns:\r\n - name: conv_rate\r\n valueType: FLOAT\r\n - name: acc_rate\r\n valueType: FLOAT\r\n - name: avg_daily_trips\r\n valueType: INT64\r\n tags:\r\n description: Used for training an ElasticNetCV model\r\nmeta: {}\r\n```\r\n## Steps to reproduce\r\n\r\nFollow the steps above in the current behavior\r\n\r\n### Specifications\r\n\r\n- Version: v0.12v\r\n\n", "before_files": [{"content": "from datetime import datetime\nfrom typing import Dict, List, Optional, Union\n\nfrom google.protobuf.json_format import MessageToJson\n\nfrom feast.feature_table import FeatureTable\nfrom feast.feature_view import FeatureView\nfrom feast.feature_view_projection import FeatureViewProjection\nfrom feast.protos.feast.core.FeatureService_pb2 import (\n FeatureService as FeatureServiceProto,\n)\nfrom feast.protos.feast.core.FeatureService_pb2 import (\n FeatureServiceMeta,\n FeatureServiceSpec,\n)\n\n\nclass FeatureService:\n \"\"\"\n A feature service is a logical grouping of features for retrieval (training or serving).\n The features grouped by a feature service may come from any number of feature views.\n\n Args:\n name: Unique name of the feature service.\n features: A list of Features that are grouped as part of this FeatureService.\n The list may contain Feature Views, Feature Tables, or a subset of either.\n tags (optional): A dictionary of key-value pairs used for organizing Feature\n Services.\n \"\"\"\n\n name: str\n features: List[FeatureViewProjection]\n tags: Dict[str, str]\n created_timestamp: Optional[datetime] = None\n last_updated_timestamp: Optional[datetime] = None\n\n def __init__(\n self,\n name: str,\n features: List[Union[FeatureTable, FeatureView, FeatureViewProjection]],\n tags: Optional[Dict[str, str]] = None,\n ):\n \"\"\"\n Creates a FeatureService object.\n\n Raises:\n ValueError: If one of the specified features is not a valid type.\n \"\"\"\n self.name = name\n self.features = []\n for feature in features:\n if isinstance(feature, FeatureTable) or isinstance(feature, FeatureView):\n self.features.append(FeatureViewProjection.from_definition(feature))\n elif isinstance(feature, FeatureViewProjection):\n self.features.append(feature)\n else:\n raise ValueError(f\"Unexpected type: {type(feature)}\")\n self.tags = tags or {}\n self.created_timestamp = None\n self.last_updated_timestamp = None\n\n def __repr__(self):\n items = (f\"{k} = {v}\" for k, v in self.__dict__.items())\n return f\"<{self.__class__.__name__}({', '.join(items)})>\"\n\n def __str__(self):\n return str(MessageToJson(self.to_proto()))\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n if not isinstance(other, FeatureService):\n raise TypeError(\n \"Comparisons should only involve FeatureService class objects.\"\n )\n if self.tags != other.tags or self.name != other.name:\n return False\n\n if sorted(self.features) != sorted(other.features):\n return False\n\n return True\n\n @staticmethod\n def from_proto(feature_service_proto: FeatureServiceProto):\n \"\"\"\n Converts a FeatureServiceProto to a FeatureService object.\n\n Args:\n feature_service_proto: A protobuf representation of a FeatureService.\n \"\"\"\n fs = FeatureService(\n name=feature_service_proto.spec.name,\n features=[\n FeatureViewProjection.from_proto(fp)\n for fp in feature_service_proto.spec.features\n ],\n tags=dict(feature_service_proto.spec.tags),\n )\n\n if feature_service_proto.meta.HasField(\"created_timestamp\"):\n fs.created_timestamp = (\n feature_service_proto.meta.created_timestamp.ToDatetime()\n )\n if feature_service_proto.meta.HasField(\"last_updated_timestamp\"):\n fs.last_updated_timestamp = (\n feature_service_proto.meta.last_updated_timestamp.ToDatetime()\n )\n\n return fs\n\n def to_proto(self) -> FeatureServiceProto:\n \"\"\"\n Converts a FeatureService to its protobuf representation.\n\n Returns:\n A FeatureServiceProto protobuf.\n \"\"\"\n meta = FeatureServiceMeta()\n if self.created_timestamp:\n meta.created_timestamp.FromDatetime(self.created_timestamp)\n\n spec = FeatureServiceSpec()\n spec.name = self.name\n for definition in self.features:\n if isinstance(definition, FeatureTable) or isinstance(\n definition, FeatureView\n ):\n feature_ref = FeatureViewProjection(\n definition.name, definition.features\n )\n else:\n feature_ref = definition\n\n spec.features.append(feature_ref.to_proto())\n\n if self.tags:\n spec.tags.update(self.tags)\n\n feature_service_proto = FeatureServiceProto(spec=spec, meta=meta)\n return feature_service_proto\n\n def validate(self):\n pass\n", "path": "sdk/python/feast/feature_service.py"}], "after_files": [{"content": "from datetime import datetime\nfrom typing import Dict, List, Optional, Union\n\nfrom google.protobuf.json_format import MessageToJson\n\nfrom feast.feature_table import FeatureTable\nfrom feast.feature_view import FeatureView\nfrom feast.feature_view_projection import FeatureViewProjection\nfrom feast.protos.feast.core.FeatureService_pb2 import (\n FeatureService as FeatureServiceProto,\n)\nfrom feast.protos.feast.core.FeatureService_pb2 import (\n FeatureServiceMeta,\n FeatureServiceSpec,\n)\n\n\nclass FeatureService:\n \"\"\"\n A feature service is a logical grouping of features for retrieval (training or serving).\n The features grouped by a feature service may come from any number of feature views.\n\n Args:\n name: Unique name of the feature service.\n features: A list of Features that are grouped as part of this FeatureService.\n The list may contain Feature Views, Feature Tables, or a subset of either.\n tags (optional): A dictionary of key-value pairs used for organizing Feature\n Services.\n \"\"\"\n\n name: str\n features: List[FeatureViewProjection]\n tags: Dict[str, str]\n description: Optional[str] = None\n created_timestamp: Optional[datetime] = None\n last_updated_timestamp: Optional[datetime] = None\n\n def __init__(\n self,\n name: str,\n features: List[Union[FeatureTable, FeatureView, FeatureViewProjection]],\n tags: Optional[Dict[str, str]] = None,\n description: Optional[str] = None,\n ):\n \"\"\"\n Creates a FeatureService object.\n\n Raises:\n ValueError: If one of the specified features is not a valid type.\n \"\"\"\n self.name = name\n self.features = []\n for feature in features:\n if isinstance(feature, FeatureTable) or isinstance(feature, FeatureView):\n self.features.append(FeatureViewProjection.from_definition(feature))\n elif isinstance(feature, FeatureViewProjection):\n self.features.append(feature)\n else:\n raise ValueError(f\"Unexpected type: {type(feature)}\")\n self.tags = tags or {}\n self.description = description\n self.created_timestamp = None\n self.last_updated_timestamp = None\n\n def __repr__(self):\n items = (f\"{k} = {v}\" for k, v in self.__dict__.items())\n return f\"<{self.__class__.__name__}({', '.join(items)})>\"\n\n def __str__(self):\n return str(MessageToJson(self.to_proto()))\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n if not isinstance(other, FeatureService):\n raise TypeError(\n \"Comparisons should only involve FeatureService class objects.\"\n )\n if self.tags != other.tags or self.name != other.name:\n return False\n\n if sorted(self.features) != sorted(other.features):\n return False\n\n return True\n\n @staticmethod\n def from_proto(feature_service_proto: FeatureServiceProto):\n \"\"\"\n Converts a FeatureServiceProto to a FeatureService object.\n\n Args:\n feature_service_proto: A protobuf representation of a FeatureService.\n \"\"\"\n fs = FeatureService(\n name=feature_service_proto.spec.name,\n features=[\n FeatureViewProjection.from_proto(fp)\n for fp in feature_service_proto.spec.features\n ],\n tags=dict(feature_service_proto.spec.tags),\n description=(\n feature_service_proto.spec.description\n if feature_service_proto.spec.description != \"\"\n else None\n ),\n )\n\n if feature_service_proto.meta.HasField(\"created_timestamp\"):\n fs.created_timestamp = (\n feature_service_proto.meta.created_timestamp.ToDatetime()\n )\n if feature_service_proto.meta.HasField(\"last_updated_timestamp\"):\n fs.last_updated_timestamp = (\n feature_service_proto.meta.last_updated_timestamp.ToDatetime()\n )\n\n return fs\n\n def to_proto(self) -> FeatureServiceProto:\n \"\"\"\n Converts a FeatureService to its protobuf representation.\n\n Returns:\n A FeatureServiceProto protobuf.\n \"\"\"\n meta = FeatureServiceMeta()\n if self.created_timestamp:\n meta.created_timestamp.FromDatetime(self.created_timestamp)\n\n spec = FeatureServiceSpec()\n spec.name = self.name\n for definition in self.features:\n if isinstance(definition, FeatureTable) or isinstance(\n definition, FeatureView\n ):\n feature_ref = FeatureViewProjection(\n definition.name, definition.features\n )\n else:\n feature_ref = definition\n\n spec.features.append(feature_ref.to_proto())\n\n if self.tags:\n spec.tags.update(self.tags)\n if self.description:\n spec.description = self.description\n\n feature_service_proto = FeatureServiceProto(spec=spec, meta=meta)\n return feature_service_proto\n\n def validate(self):\n pass\n", "path": "sdk/python/feast/feature_service.py"}]} | 1,985 | 383 |
gh_patches_debug_5895 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1576 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flask style test failing health check in CI
Flask's style test in GitHub actions runs `tox -e style`, which runs `pre-commit run --all-files --show-diff-on-failure`, which is failing with "An unexpected error has occurred: AssertionError: BUG: expected environment for python to be healthy() immediately after install, please open an issue describing your environment".
Note, this does not reproduce locally.
AFAIK, this first appeared in pallets/flask#3739.
We're trying to debug this over in pallets/flask#3740 where I've temporarily added a `cat .../pre-commit.log` command. Output here: https://github.com/pallets/flask/pull/3740/checks?check_run_id=1018335736#step:9:14
Neither the error message nor the log actually say what specifically about the environment is unhealthy.
Any ideas?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/python.py`
Content:
```
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Dict
6 from typing import Generator
7 from typing import Optional
8 from typing import Sequence
9 from typing import Tuple
10
11 import pre_commit.constants as C
12 from pre_commit.envcontext import envcontext
13 from pre_commit.envcontext import PatchesT
14 from pre_commit.envcontext import UNSET
15 from pre_commit.envcontext import Var
16 from pre_commit.hook import Hook
17 from pre_commit.languages import helpers
18 from pre_commit.parse_shebang import find_executable
19 from pre_commit.prefix import Prefix
20 from pre_commit.util import CalledProcessError
21 from pre_commit.util import clean_path_on_failure
22 from pre_commit.util import cmd_output
23 from pre_commit.util import cmd_output_b
24
25 ENVIRONMENT_DIR = 'py_env'
26
27
28 @functools.lru_cache(maxsize=None)
29 def _version_info(exe: str) -> str:
30 prog = 'import sys;print(".".join(str(p) for p in sys.version_info))'
31 try:
32 return cmd_output(exe, '-S', '-c', prog)[1].strip()
33 except CalledProcessError:
34 return f'<<error retrieving version from {exe}>>'
35
36
37 def _read_pyvenv_cfg(filename: str) -> Dict[str, str]:
38 ret = {}
39 with open(filename) as f:
40 for line in f:
41 try:
42 k, v = line.split('=')
43 except ValueError: # blank line / comment / etc.
44 continue
45 else:
46 ret[k.strip()] = v.strip()
47 return ret
48
49
50 def bin_dir(venv: str) -> str:
51 """On windows there's a different directory for the virtualenv"""
52 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
53 return os.path.join(venv, bin_part)
54
55
56 def get_env_patch(venv: str) -> PatchesT:
57 return (
58 ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),
59 ('PYTHONHOME', UNSET),
60 ('VIRTUAL_ENV', venv),
61 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
62 )
63
64
65 def _find_by_py_launcher(
66 version: str,
67 ) -> Optional[str]: # pragma: no cover (windows only)
68 if version.startswith('python'):
69 num = version[len('python'):]
70 cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
71 env = dict(os.environ, PYTHONIOENCODING='UTF-8')
72 try:
73 return cmd_output(*cmd, env=env)[1].strip()
74 except CalledProcessError:
75 pass
76 return None
77
78
79 def _find_by_sys_executable() -> Optional[str]:
80 def _norm(path: str) -> Optional[str]:
81 _, exe = os.path.split(path.lower())
82 exe, _, _ = exe.partition('.exe')
83 if exe not in {'python', 'pythonw'} and find_executable(exe):
84 return exe
85 return None
86
87 # On linux, I see these common sys.executables:
88 #
89 # system `python`: /usr/bin/python -> python2.7
90 # system `python2`: /usr/bin/python2 -> python2.7
91 # virtualenv v: v/bin/python (will not return from this loop)
92 # virtualenv v -ppython2: v/bin/python -> python2
93 # virtualenv v -ppython2.7: v/bin/python -> python2.7
94 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
95 for path in (sys.executable, os.path.realpath(sys.executable)):
96 exe = _norm(path)
97 if exe:
98 return exe
99 return None
100
101
102 @functools.lru_cache(maxsize=1)
103 def get_default_version() -> str: # pragma: no cover (platform dependent)
104 # First attempt from `sys.executable` (or the realpath)
105 exe = _find_by_sys_executable()
106 if exe:
107 return exe
108
109 # Next try the `pythonX.X` executable
110 exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'
111 if find_executable(exe):
112 return exe
113
114 if _find_by_py_launcher(exe):
115 return exe
116
117 # Give a best-effort try for windows
118 default_folder_name = exe.replace('.', '')
119 if os.path.exists(fr'C:\{default_folder_name}\python.exe'):
120 return exe
121
122 # We tried!
123 return C.DEFAULT
124
125
126 def _sys_executable_matches(version: str) -> bool:
127 if version == 'python':
128 return True
129 elif not version.startswith('python'):
130 return False
131
132 try:
133 info = tuple(int(p) for p in version[len('python'):].split('.'))
134 except ValueError:
135 return False
136
137 return sys.version_info[:len(info)] == info
138
139
140 def norm_version(version: str) -> str:
141 if version == C.DEFAULT:
142 return os.path.realpath(sys.executable)
143
144 # first see if our current executable is appropriate
145 if _sys_executable_matches(version):
146 return sys.executable
147
148 if os.name == 'nt': # pragma: no cover (windows)
149 version_exec = _find_by_py_launcher(version)
150 if version_exec:
151 return version_exec
152
153 # Try looking up by name
154 version_exec = find_executable(version)
155 if version_exec and version_exec != version:
156 return version_exec
157
158 # If it is in the form pythonx.x search in the default
159 # place on windows
160 if version.startswith('python'):
161 default_folder_name = version.replace('.', '')
162 return fr'C:\{default_folder_name}\python.exe'
163
164 # Otherwise assume it is a path
165 return os.path.expanduser(version)
166
167
168 @contextlib.contextmanager
169 def in_env(
170 prefix: Prefix,
171 language_version: str,
172 ) -> Generator[None, None, None]:
173 directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
174 envdir = prefix.path(directory)
175 with envcontext(get_env_patch(envdir)):
176 yield
177
178
179 def healthy(prefix: Prefix, language_version: str) -> bool:
180 directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
181 envdir = prefix.path(directory)
182 pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')
183
184 # created with "old" virtualenv
185 if not os.path.exists(pyvenv_cfg):
186 return False
187
188 exe_name = 'python.exe' if sys.platform == 'win32' else 'python'
189 py_exe = prefix.path(bin_dir(envdir), exe_name)
190 cfg = _read_pyvenv_cfg(pyvenv_cfg)
191
192 return (
193 'version_info' in cfg and
194 _version_info(py_exe) == cfg['version_info'] and (
195 'base-executable' not in cfg or
196 _version_info(cfg['base-executable']) == cfg['version_info']
197 )
198 )
199
200
201 def install_environment(
202 prefix: Prefix,
203 version: str,
204 additional_dependencies: Sequence[str],
205 ) -> None:
206 envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
207 python = norm_version(version)
208 venv_cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
209 install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)
210
211 with clean_path_on_failure(envdir):
212 cmd_output_b(*venv_cmd, cwd='/')
213 with in_env(prefix, version):
214 helpers.run_setup_cmd(prefix, install_cmd)
215
216
217 def run_hook(
218 hook: Hook,
219 file_args: Sequence[str],
220 color: bool,
221 ) -> Tuple[int, bytes]:
222 with in_env(hook.prefix, hook.language_version):
223 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -191,7 +191,8 @@
return (
'version_info' in cfg and
- _version_info(py_exe) == cfg['version_info'] and (
+ # always use uncached lookup here in case we replaced an unhealthy env
+ _version_info.__wrapped__(py_exe) == cfg['version_info'] and (
'base-executable' not in cfg or
_version_info(cfg['base-executable']) == cfg['version_info']
)
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -191,7 +191,8 @@\n \n return (\n 'version_info' in cfg and\n- _version_info(py_exe) == cfg['version_info'] and (\n+ # always use uncached lookup here in case we replaced an unhealthy env\n+ _version_info.__wrapped__(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n", "issue": "Flask style test failing health check in CI\nFlask's style test in GitHub actions runs `tox -e style`, which runs `pre-commit run --all-files --show-diff-on-failure`, which is failing with \"An unexpected error has occurred: AssertionError: BUG: expected environment for python to be healthy() immediately after install, please open an issue describing your environment\".\r\n\r\nNote, this does not reproduce locally.\r\n\r\nAFAIK, this first appeared in pallets/flask#3739.\r\n\r\nWe're trying to debug this over in pallets/flask#3740 where I've temporarily added a `cat .../pre-commit.log` command. Output here: https://github.com/pallets/flask/pull/3740/checks?check_run_id=1018335736#step:9:14\r\n\r\nNeither the error message nor the log actually say what specifically about the environment is unhealthy.\r\n \r\nAny ideas?\r\n\r\nThanks!\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename) as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n if version == C.DEFAULT:\n return os.path.realpath(sys.executable)\n\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n _version_info(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n python = norm_version(version)\n venv_cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename) as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n if version == C.DEFAULT:\n return os.path.realpath(sys.executable)\n\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n # always use uncached lookup here in case we replaced an unhealthy env\n _version_info.__wrapped__(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n python = norm_version(version)\n venv_cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py"}]} | 2,747 | 144 |
gh_patches_debug_7250 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to use upstream proxy and proxy authentification
##### Steps to reproduce the problem:
1. git clone https://github.com/mitmproxy/mitmproxy.git
2. cd mitmproxy
3. ./dev.sh
4. . venv/bin/activate
5. mitmdump --proxyauth test:test -m upstream:http://10.7.0.225:8080/ --ssl-insecure
On 10.7.0.225, I have a VM that runs mitmproxy :
6. mitmdump --insecure
##### Any other comments? What have you tried so far?
It seems that each time you have a clientdisconnect/clientconnect, you get a new prompt to login (proxyauth). Basically for each domain you have to login to the proxyauth, here test/test.
I wild guess would be that the information in the header to pass by the upstreamproxy somehow is not compatible with the header auth information
##### System information
HOST :
```
Mitmproxy version: 3.0.0 (2.0.0dev0435-0x1c6b33f)
Python version: 3.6.0
Platform: Linux-4.9.0-0.bpo.2-amd64-x86_64-with-debian-8.7
SSL version: OpenSSL 1.1.0e 16 Feb 2017
Linux distro: debian 8.7
```
VM (10.7.0.225):
```
Mitmproxy version : 2.0.0 (release version)
Python version : 3.5.2
Platform : Linux-4.4.0-75-generic-x86_64-with-Ubuntu-16.04-xenial
SSL version: OpenSSL 1.0.2g 1 Mar 2016
Linux distro: Ubuntu 16.04 xenial
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/proxyauth.py`
Content:
```
1 import binascii
2 import weakref
3 import ldap3
4 from typing import Optional
5 from typing import MutableMapping # noqa
6 from typing import Tuple
7
8 import passlib.apache
9
10 import mitmproxy.net.http
11 from mitmproxy import connections # noqa
12 from mitmproxy import exceptions
13 from mitmproxy import http
14 from mitmproxy import ctx
15 from mitmproxy.net.http import status_codes
16
17 REALM = "mitmproxy"
18
19
20 def mkauth(username: str, password: str, scheme: str = "basic") -> str:
21 """
22 Craft a basic auth string
23 """
24 v = binascii.b2a_base64(
25 (username + ":" + password).encode("utf8")
26 ).decode("ascii")
27 return scheme + " " + v
28
29
30 def parse_http_basic_auth(s: str) -> Tuple[str, str, str]:
31 """
32 Parse a basic auth header.
33 Raises a ValueError if the input is invalid.
34 """
35 scheme, authinfo = s.split()
36 if scheme.lower() != "basic":
37 raise ValueError("Unknown scheme")
38 try:
39 user, password = binascii.a2b_base64(authinfo.encode()).decode("utf8", "replace").split(":")
40 except binascii.Error as e:
41 raise ValueError(str(e))
42 return scheme, user, password
43
44
45 class ProxyAuth:
46 def __init__(self):
47 self.nonanonymous = False
48 self.htpasswd = None
49 self.singleuser = None
50 self.ldapconn = None
51 self.ldapserver = None
52 self.authenticated = weakref.WeakKeyDictionary() # type: MutableMapping[connections.ClientConnection, Tuple[str, str]]
53 """Contains all connections that are permanently authenticated after an HTTP CONNECT"""
54
55 def enabled(self) -> bool:
56 return any([self.nonanonymous, self.htpasswd, self.singleuser, self.ldapconn, self.ldapserver])
57
58 def is_proxy_auth(self) -> bool:
59 """
60 Returns:
61 - True, if authentication is done as if mitmproxy is a proxy
62 - False, if authentication is done as if mitmproxy is a HTTP server
63 """
64 return ctx.options.mode in ("regular", "upstream")
65
66 def which_auth_header(self) -> str:
67 if self.is_proxy_auth():
68 return 'Proxy-Authorization'
69 else:
70 return 'Authorization'
71
72 def auth_required_response(self) -> http.HTTPResponse:
73 if self.is_proxy_auth():
74 return http.make_error_response(
75 status_codes.PROXY_AUTH_REQUIRED,
76 headers=mitmproxy.net.http.Headers(Proxy_Authenticate='Basic realm="{}"'.format(REALM)),
77 )
78 else:
79 return http.make_error_response(
80 status_codes.UNAUTHORIZED,
81 headers=mitmproxy.net.http.Headers(WWW_Authenticate='Basic realm="{}"'.format(REALM)),
82 )
83
84 def check(self, f: http.HTTPFlow) -> Optional[Tuple[str, str]]:
85 """
86 Check if a request is correctly authenticated.
87 Returns:
88 - a (username, password) tuple if successful,
89 - None, otherwise.
90 """
91 auth_value = f.request.headers.get(self.which_auth_header(), "")
92 try:
93 scheme, username, password = parse_http_basic_auth(auth_value)
94 except ValueError:
95 return None
96
97 if self.nonanonymous:
98 return username, password
99 elif self.singleuser:
100 if self.singleuser == [username, password]:
101 return username, password
102 elif self.htpasswd:
103 if self.htpasswd.check_password(username, password):
104 return username, password
105 elif self.ldapconn:
106 if not username or not password:
107 return None
108 self.ldapconn.search(ctx.options.proxyauth.split(':')[4], '(cn=' + username + ')')
109 if self.ldapconn.response:
110 conn = ldap3.Connection(
111 self.ldapserver,
112 self.ldapconn.response[0]['dn'],
113 password,
114 auto_bind=True)
115 if conn:
116 return username, password
117 return None
118
119 def authenticate(self, f: http.HTTPFlow) -> bool:
120 valid_credentials = self.check(f)
121 if valid_credentials:
122 f.metadata["proxyauth"] = valid_credentials
123 del f.request.headers[self.which_auth_header()]
124 return True
125 else:
126 f.response = self.auth_required_response()
127 return False
128
129 # Handlers
130 def configure(self, updated):
131 if "proxyauth" in updated:
132 self.nonanonymous = False
133 self.singleuser = None
134 self.htpasswd = None
135 self.ldapserver = None
136 if ctx.options.proxyauth:
137 if ctx.options.proxyauth == "any":
138 self.nonanonymous = True
139 elif ctx.options.proxyauth.startswith("@"):
140 p = ctx.options.proxyauth[1:]
141 try:
142 self.htpasswd = passlib.apache.HtpasswdFile(p)
143 except (ValueError, OSError) as v:
144 raise exceptions.OptionsError(
145 "Could not open htpasswd file: %s" % p
146 )
147 elif ctx.options.proxyauth.startswith("ldap"):
148 parts = ctx.options.proxyauth.split(':')
149 security = parts[0]
150 ldap_server = parts[1]
151 dn_baseauth = parts[2]
152 password_baseauth = parts[3]
153 if len(parts) != 5:
154 raise exceptions.OptionsError(
155 "Invalid ldap specification"
156 )
157 if security == "ldaps":
158 server = ldap3.Server(ldap_server, use_ssl=True)
159 elif security == "ldap":
160 server = ldap3.Server(ldap_server)
161 else:
162 raise exceptions.OptionsError(
163 "Invalid ldap specfication on the first part"
164 )
165 conn = ldap3.Connection(
166 server,
167 dn_baseauth,
168 password_baseauth,
169 auto_bind=True)
170 self.ldapconn = conn
171 self.ldapserver = server
172 else:
173 parts = ctx.options.proxyauth.split(':')
174 if len(parts) != 2:
175 raise exceptions.OptionsError(
176 "Invalid single-user auth specification."
177 )
178 self.singleuser = parts
179 if self.enabled():
180 if ctx.options.mode == "transparent":
181 raise exceptions.OptionsError(
182 "Proxy Authentication not supported in transparent mode."
183 )
184 if ctx.options.mode == "socks5":
185 raise exceptions.OptionsError(
186 "Proxy Authentication not supported in SOCKS mode. "
187 "https://github.com/mitmproxy/mitmproxy/issues/738"
188 )
189 # TODO: check for multiple auth options
190
191 def http_connect(self, f: http.HTTPFlow) -> None:
192 if self.enabled():
193 if self.authenticate(f):
194 self.authenticated[f.client_conn] = f.metadata["proxyauth"]
195
196 def requestheaders(self, f: http.HTTPFlow) -> None:
197 if self.enabled():
198 # Is this connection authenticated by a previous HTTP CONNECT?
199 if f.client_conn in self.authenticated:
200 f.metadata["proxyauth"] = self.authenticated[f.client_conn]
201 return
202 self.authenticate(f)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py
--- a/mitmproxy/addons/proxyauth.py
+++ b/mitmproxy/addons/proxyauth.py
@@ -61,7 +61,7 @@
- True, if authentication is done as if mitmproxy is a proxy
- False, if authentication is done as if mitmproxy is a HTTP server
"""
- return ctx.options.mode in ("regular", "upstream")
+ return ctx.options.mode == "regular" or ctx.options.mode.startswith("upstream:")
def which_auth_header(self) -> str:
if self.is_proxy_auth():
| {"golden_diff": "diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py\n--- a/mitmproxy/addons/proxyauth.py\n+++ b/mitmproxy/addons/proxyauth.py\n@@ -61,7 +61,7 @@\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is a HTTP server\n \"\"\"\n- return ctx.options.mode in (\"regular\", \"upstream\")\n+ return ctx.options.mode == \"regular\" or ctx.options.mode.startswith(\"upstream:\")\n \n def which_auth_header(self) -> str:\n if self.is_proxy_auth():\n", "issue": "Unable to use upstream proxy and proxy authentification\n##### Steps to reproduce the problem:\r\n\r\n1. git clone https://github.com/mitmproxy/mitmproxy.git\r\n2. cd mitmproxy\r\n3. ./dev.sh \r\n4. . venv/bin/activate\r\n5. mitmdump --proxyauth test:test -m upstream:http://10.7.0.225:8080/ --ssl-insecure\r\n\r\nOn 10.7.0.225, I have a VM that runs mitmproxy :\r\n6. mitmdump --insecure\r\n\r\n##### Any other comments? What have you tried so far?\r\nIt seems that each time you have a clientdisconnect/clientconnect, you get a new prompt to login (proxyauth). Basically for each domain you have to login to the proxyauth, here test/test. \r\nI wild guess would be that the information in the header to pass by the upstreamproxy somehow is not compatible with the header auth information \r\n\r\n##### System information\r\nHOST : \r\n```\r\nMitmproxy version: 3.0.0 (2.0.0dev0435-0x1c6b33f) \r\nPython version: 3.6.0\r\nPlatform: Linux-4.9.0-0.bpo.2-amd64-x86_64-with-debian-8.7\r\nSSL version: OpenSSL 1.1.0e 16 Feb 2017\r\nLinux distro: debian 8.7 \r\n```\r\n\r\nVM (10.7.0.225):\r\n```\r\nMitmproxy version : 2.0.0 (release version)\r\nPython version : 3.5.2\r\nPlatform : Linux-4.4.0-75-generic-x86_64-with-Ubuntu-16.04-xenial\r\nSSL version: OpenSSL 1.0.2g 1 Mar 2016\r\nLinux distro: Ubuntu 16.04 xenial\r\n```\r\n\n", "before_files": [{"content": "import binascii\nimport weakref\nimport ldap3\nfrom typing import Optional\nfrom typing import MutableMapping # noqa\nfrom typing import Tuple\n\nimport passlib.apache\n\nimport mitmproxy.net.http\nfrom mitmproxy import connections # noqa\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy import ctx\nfrom mitmproxy.net.http import status_codes\n\nREALM = \"mitmproxy\"\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64(\n (username + \":\" + password).encode(\"utf8\")\n ).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> Tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass ProxyAuth:\n def __init__(self):\n self.nonanonymous = False\n self.htpasswd = None\n self.singleuser = None\n self.ldapconn = None\n self.ldapserver = None\n self.authenticated = weakref.WeakKeyDictionary() # type: MutableMapping[connections.ClientConnection, Tuple[str, str]]\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def enabled(self) -> bool:\n return any([self.nonanonymous, self.htpasswd, self.singleuser, self.ldapconn, self.ldapserver])\n\n def is_proxy_auth(self) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is a HTTP server\n \"\"\"\n return ctx.options.mode in (\"regular\", \"upstream\")\n\n def which_auth_header(self) -> str:\n if self.is_proxy_auth():\n return 'Proxy-Authorization'\n else:\n return 'Authorization'\n\n def auth_required_response(self) -> http.HTTPResponse:\n if self.is_proxy_auth():\n return http.make_error_response(\n status_codes.PROXY_AUTH_REQUIRED,\n headers=mitmproxy.net.http.Headers(Proxy_Authenticate='Basic realm=\"{}\"'.format(REALM)),\n )\n else:\n return http.make_error_response(\n status_codes.UNAUTHORIZED,\n headers=mitmproxy.net.http.Headers(WWW_Authenticate='Basic realm=\"{}\"'.format(REALM)),\n )\n\n def check(self, f: http.HTTPFlow) -> Optional[Tuple[str, str]]:\n \"\"\"\n Check if a request is correctly authenticated.\n Returns:\n - a (username, password) tuple if successful,\n - None, otherwise.\n \"\"\"\n auth_value = f.request.headers.get(self.which_auth_header(), \"\")\n try:\n scheme, username, password = parse_http_basic_auth(auth_value)\n except ValueError:\n return None\n\n if self.nonanonymous:\n return username, password\n elif self.singleuser:\n if self.singleuser == [username, password]:\n return username, password\n elif self.htpasswd:\n if self.htpasswd.check_password(username, password):\n return username, password\n elif self.ldapconn:\n if not username or not password:\n return None\n self.ldapconn.search(ctx.options.proxyauth.split(':')[4], '(cn=' + username + ')')\n if self.ldapconn.response:\n conn = ldap3.Connection(\n self.ldapserver,\n self.ldapconn.response[0]['dn'],\n password,\n auto_bind=True)\n if conn:\n return username, password\n return None\n\n def authenticate(self, f: http.HTTPFlow) -> bool:\n valid_credentials = self.check(f)\n if valid_credentials:\n f.metadata[\"proxyauth\"] = valid_credentials\n del f.request.headers[self.which_auth_header()]\n return True\n else:\n f.response = self.auth_required_response()\n return False\n\n # Handlers\n def configure(self, updated):\n if \"proxyauth\" in updated:\n self.nonanonymous = False\n self.singleuser = None\n self.htpasswd = None\n self.ldapserver = None\n if ctx.options.proxyauth:\n if ctx.options.proxyauth == \"any\":\n self.nonanonymous = True\n elif ctx.options.proxyauth.startswith(\"@\"):\n p = ctx.options.proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(p)\n except (ValueError, OSError) as v:\n raise exceptions.OptionsError(\n \"Could not open htpasswd file: %s\" % p\n )\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n parts = ctx.options.proxyauth.split(':')\n security = parts[0]\n ldap_server = parts[1]\n dn_baseauth = parts[2]\n password_baseauth = parts[3]\n if len(parts) != 5:\n raise exceptions.OptionsError(\n \"Invalid ldap specification\"\n )\n if security == \"ldaps\":\n server = ldap3.Server(ldap_server, use_ssl=True)\n elif security == \"ldap\":\n server = ldap3.Server(ldap_server)\n else:\n raise exceptions.OptionsError(\n \"Invalid ldap specfication on the first part\"\n )\n conn = ldap3.Connection(\n server,\n dn_baseauth,\n password_baseauth,\n auto_bind=True)\n self.ldapconn = conn\n self.ldapserver = server\n else:\n parts = ctx.options.proxyauth.split(':')\n if len(parts) != 2:\n raise exceptions.OptionsError(\n \"Invalid single-user auth specification.\"\n )\n self.singleuser = parts\n if self.enabled():\n if ctx.options.mode == \"transparent\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in transparent mode.\"\n )\n if ctx.options.mode == \"socks5\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in SOCKS mode. \"\n \"https://github.com/mitmproxy/mitmproxy/issues/738\"\n )\n # TODO: check for multiple auth options\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.enabled():\n if self.authenticate(f):\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.enabled():\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n return\n self.authenticate(f)\n", "path": "mitmproxy/addons/proxyauth.py"}], "after_files": [{"content": "import binascii\nimport weakref\nimport ldap3\nfrom typing import Optional\nfrom typing import MutableMapping # noqa\nfrom typing import Tuple\n\nimport passlib.apache\n\nimport mitmproxy.net.http\nfrom mitmproxy import connections # noqa\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy import ctx\nfrom mitmproxy.net.http import status_codes\n\nREALM = \"mitmproxy\"\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64(\n (username + \":\" + password).encode(\"utf8\")\n ).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> Tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass ProxyAuth:\n def __init__(self):\n self.nonanonymous = False\n self.htpasswd = None\n self.singleuser = None\n self.ldapconn = None\n self.ldapserver = None\n self.authenticated = weakref.WeakKeyDictionary() # type: MutableMapping[connections.ClientConnection, Tuple[str, str]]\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def enabled(self) -> bool:\n return any([self.nonanonymous, self.htpasswd, self.singleuser, self.ldapconn, self.ldapserver])\n\n def is_proxy_auth(self) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is a HTTP server\n \"\"\"\n return ctx.options.mode == \"regular\" or ctx.options.mode.startswith(\"upstream:\")\n\n def which_auth_header(self) -> str:\n if self.is_proxy_auth():\n return 'Proxy-Authorization'\n else:\n return 'Authorization'\n\n def auth_required_response(self) -> http.HTTPResponse:\n if self.is_proxy_auth():\n return http.make_error_response(\n status_codes.PROXY_AUTH_REQUIRED,\n headers=mitmproxy.net.http.Headers(Proxy_Authenticate='Basic realm=\"{}\"'.format(REALM)),\n )\n else:\n return http.make_error_response(\n status_codes.UNAUTHORIZED,\n headers=mitmproxy.net.http.Headers(WWW_Authenticate='Basic realm=\"{}\"'.format(REALM)),\n )\n\n def check(self, f: http.HTTPFlow) -> Optional[Tuple[str, str]]:\n \"\"\"\n Check if a request is correctly authenticated.\n Returns:\n - a (username, password) tuple if successful,\n - None, otherwise.\n \"\"\"\n auth_value = f.request.headers.get(self.which_auth_header(), \"\")\n try:\n scheme, username, password = parse_http_basic_auth(auth_value)\n except ValueError:\n return None\n\n if self.nonanonymous:\n return username, password\n elif self.singleuser:\n if self.singleuser == [username, password]:\n return username, password\n elif self.htpasswd:\n if self.htpasswd.check_password(username, password):\n return username, password\n elif self.ldapconn:\n if not username or not password:\n return None\n self.ldapconn.search(ctx.options.proxyauth.split(':')[4], '(cn=' + username + ')')\n if self.ldapconn.response:\n conn = ldap3.Connection(\n self.ldapserver,\n self.ldapconn.response[0]['dn'],\n password,\n auto_bind=True)\n if conn:\n return username, password\n return None\n\n def authenticate(self, f: http.HTTPFlow) -> bool:\n valid_credentials = self.check(f)\n if valid_credentials:\n f.metadata[\"proxyauth\"] = valid_credentials\n del f.request.headers[self.which_auth_header()]\n return True\n else:\n f.response = self.auth_required_response()\n return False\n\n # Handlers\n def configure(self, updated):\n if \"proxyauth\" in updated:\n self.nonanonymous = False\n self.singleuser = None\n self.htpasswd = None\n self.ldapserver = None\n if ctx.options.proxyauth:\n if ctx.options.proxyauth == \"any\":\n self.nonanonymous = True\n elif ctx.options.proxyauth.startswith(\"@\"):\n p = ctx.options.proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(p)\n except (ValueError, OSError) as v:\n raise exceptions.OptionsError(\n \"Could not open htpasswd file: %s\" % p\n )\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n parts = ctx.options.proxyauth.split(':')\n security = parts[0]\n ldap_server = parts[1]\n dn_baseauth = parts[2]\n password_baseauth = parts[3]\n if len(parts) != 5:\n raise exceptions.OptionsError(\n \"Invalid ldap specification\"\n )\n if security == \"ldaps\":\n server = ldap3.Server(ldap_server, use_ssl=True)\n elif security == \"ldap\":\n server = ldap3.Server(ldap_server)\n else:\n raise exceptions.OptionsError(\n \"Invalid ldap specfication on the first part\"\n )\n conn = ldap3.Connection(\n server,\n dn_baseauth,\n password_baseauth,\n auto_bind=True)\n self.ldapconn = conn\n self.ldapserver = server\n else:\n parts = ctx.options.proxyauth.split(':')\n if len(parts) != 2:\n raise exceptions.OptionsError(\n \"Invalid single-user auth specification.\"\n )\n self.singleuser = parts\n if self.enabled():\n if ctx.options.mode == \"transparent\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in transparent mode.\"\n )\n if ctx.options.mode == \"socks5\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in SOCKS mode. \"\n \"https://github.com/mitmproxy/mitmproxy/issues/738\"\n )\n # TODO: check for multiple auth options\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.enabled():\n if self.authenticate(f):\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.enabled():\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n return\n self.authenticate(f)\n", "path": "mitmproxy/addons/proxyauth.py"}]} | 2,706 | 148 |
gh_patches_debug_42061 | rasdani/github-patches | git_diff | pytorch__text-217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TabularDataset class with header does not match fields by name
Currently, the TabularDataset class matches each feature in a csv/tsv file to its field by requiring the (name, Field) tuples passed to the fields kwarg to be in the same order as the columns csv/tsv file.
I think this is counterintuitive, especially if the csv/tsv file has a header. If there is a header, I think that the user should be allowed to specify the fields similar to the json format, where they pass in a dict mapping the column name to the (name, Field) tuple.
Here’s an example of what I’m thinking.
```
$ head -n 2 train.csv
"text","label"
"hello",0
```
```python
>>> pos = data.TabularDataset(
... path='train.tsv', format='csv',
... fields={'labels': ('labels', data.Field()),
... 'text': ('text', data.Field())}
...
```
I think they should also be allowed to select a subset of the rows using this method, similar to the API for the json format (columns not in the keys will be ignored).
This should provide a more consistent and intuitive API. If the maintainers are willing to accept this API, I would be happy to implement it and send a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/example.py`
Content:
```
1 import json
2
3 import six
4
5
6 class Example(object):
7 """Defines a single training or test example.
8
9 Stores each column of the example as an attribute.
10 """
11
12 @classmethod
13 def fromJSON(cls, data, fields):
14 return cls.fromdict(json.loads(data), fields)
15
16 @classmethod
17 def fromdict(cls, data, fields):
18 ex = cls()
19 for key, vals in fields.items():
20 if key not in data:
21 raise ValueError("Specified key {} was not found in "
22 "the input data".format(key))
23 if vals is not None:
24 if not isinstance(vals, list):
25 vals = [vals]
26 for val in vals:
27 name, field = val
28 setattr(ex, name, field.preprocess(data[key]))
29 return ex
30
31 @classmethod
32 def fromTSV(cls, data, fields):
33 return cls.fromlist(data, fields)
34
35 @classmethod
36 def fromCSV(cls, data, fields):
37 return cls.fromlist(data, fields)
38
39 @classmethod
40 def fromlist(cls, data, fields):
41 ex = cls()
42 for (name, field), val in zip(fields, data):
43 if field is not None:
44 if isinstance(val, six.string_types):
45 val = val.rstrip('\n')
46 setattr(ex, name, field.preprocess(val))
47 return ex
48
49 @classmethod
50 def fromtree(cls, data, fields, subtrees=False):
51 try:
52 from nltk.tree import Tree
53 except ImportError:
54 print("Please install NLTK. "
55 "See the docs at http://nltk.org for more information.")
56 raise
57 tree = Tree.fromstring(data)
58 if subtrees:
59 return [cls.fromlist(
60 [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]
61 return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)
62
```
Path: `torchtext/data/dataset.py`
Content:
```
1 import io
2 import os
3 import zipfile
4 import tarfile
5
6 import torch.utils.data
7
8 from .example import Example
9 from ..utils import download_from_url, unicode_csv_reader
10
11
12 class Dataset(torch.utils.data.Dataset):
13 """Defines a dataset composed of Examples along with its Fields.
14
15 Attributes:
16 sort_key (callable): A key to use for sorting dataset examples for batching
17 together examples with similar lengths to minimize padding.
18 examples (list(Example)): The examples in this dataset.
19 fields (dict[str, Field]): Contains the name of each column or field, together
20 with the corresponding Field object. Two fields with the same Field object
21 will have a shared vocabulary.
22 """
23 sort_key = None
24
25 def __init__(self, examples, fields, filter_pred=None):
26 """Create a dataset from a list of Examples and Fields.
27
28 Arguments:
29 examples: List of Examples.
30 fields (List(tuple(str, Field))): The Fields to use in this tuple. The
31 string is a field name, and the Field is the associated field.
32 filter_pred (callable or None): Use only examples for which
33 filter_pred(example) is True, or use all examples if None.
34 Default is None.
35 """
36 if filter_pred is not None:
37 make_list = isinstance(examples, list)
38 examples = filter(filter_pred, examples)
39 if make_list:
40 examples = list(examples)
41 self.examples = examples
42 self.fields = dict(fields)
43
44 @classmethod
45 def splits(cls, path=None, root='.data', train=None, validation=None,
46 test=None, **kwargs):
47 """Create Dataset objects for multiple splits of a dataset.
48
49 Arguments:
50 path (str): Common prefix of the splits' file paths, or None to use
51 the result of cls.download(root).
52 root (str): Root dataset storage directory. Default is '.data'.
53 train (str): Suffix to add to path for the train set, or None for no
54 train set. Default is None.
55 validation (str): Suffix to add to path for the validation set, or None
56 for no validation set. Default is None.
57 test (str): Suffix to add to path for the test set, or None for no test
58 set. Default is None.
59 Remaining keyword arguments: Passed to the constructor of the
60 Dataset (sub)class being used.
61
62 Returns:
63 Tuple[Dataset]: Datasets for train, validation, and
64 test splits in that order, if provided.
65 """
66 if path is None:
67 path = cls.download(root)
68 train_data = None if train is None else cls(
69 os.path.join(path, train), **kwargs)
70 val_data = None if validation is None else cls(
71 os.path.join(path, validation), **kwargs)
72 test_data = None if test is None else cls(
73 os.path.join(path, test), **kwargs)
74 return tuple(d for d in (train_data, val_data, test_data)
75 if d is not None)
76
77 def __getitem__(self, i):
78 return self.examples[i]
79
80 def __len__(self):
81 try:
82 return len(self.examples)
83 except TypeError:
84 return 2**32
85
86 def __iter__(self):
87 for x in self.examples:
88 yield x
89
90 def __getattr__(self, attr):
91 if attr in self.fields:
92 for x in self.examples:
93 yield getattr(x, attr)
94
95 @classmethod
96 def download(cls, root, check=None):
97 """Download and unzip an online archive (.zip, .gz, or .tgz).
98
99 Arguments:
100 root (str): Folder to download data to.
101 check (str or None): Folder whose existence indicates
102 that the dataset has already been downloaded, or
103 None to check the existence of root/{cls.name}.
104
105 Returns:
106 str: Path to extracted dataset.
107 """
108 path = os.path.join(root, cls.name)
109 check = path if check is None else check
110 if not os.path.isdir(check):
111 for url in cls.urls:
112 if isinstance(url, tuple):
113 url, filename = url
114 else:
115 filename = os.path.basename(url)
116 zpath = os.path.join(path, filename)
117 if not os.path.isfile(zpath):
118 if not os.path.exists(os.path.dirname(zpath)):
119 os.makedirs(os.path.dirname(zpath))
120 print('downloading {}'.format(filename))
121 download_from_url(url, zpath)
122 ext = os.path.splitext(filename)[-1]
123 if ext == '.zip':
124 with zipfile.ZipFile(zpath, 'r') as zfile:
125 print('extracting')
126 zfile.extractall(path)
127 elif ext in ['.gz', '.tgz']:
128 with tarfile.open(zpath, 'r:gz') as tar:
129 dirs = [member for member in tar.getmembers()]
130 tar.extractall(path=path, members=dirs)
131 return os.path.join(path, cls.dirname)
132
133
134 class TabularDataset(Dataset):
135 """Defines a Dataset of columns stored in CSV, TSV, or JSON format."""
136
137 def __init__(self, path, format, fields, skip_header=False, **kwargs):
138 """Create a TabularDataset given a path, file format, and field list.
139
140 Arguments:
141 path (str): Path to the data file.
142 format (str): The format of the data file. One of "CSV", "TSV", or
143 "JSON" (case-insensitive).
144 fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]: For CSV and
145 TSV formats, list of tuples of (name, field). The list should be in
146 the same order as the columns in the CSV or TSV file, while tuples of
147 (name, None) represent columns that will be ignored. For JSON format,
148 dictionary whose keys are the JSON keys and whose values are tuples of
149 (name, field). This allows the user to rename columns from their JSON key
150 names and also enables selecting a subset of columns to load
151 (since JSON keys not present in the input dictionary are ignored).
152 skip_header (bool): Whether to skip the first line of the input file.
153 """
154 make_example = {
155 'json': Example.fromJSON, 'dict': Example.fromdict,
156 'tsv': Example.fromTSV, 'csv': Example.fromCSV}[format.lower()]
157
158 with io.open(os.path.expanduser(path), encoding="utf8") as f:
159 if format == 'csv':
160 reader = unicode_csv_reader(f)
161 elif format == 'tsv':
162 reader = unicode_csv_reader(f, delimiter='\t')
163 else:
164 reader = f
165
166 if skip_header:
167 next(reader)
168 examples = [make_example(line, fields) for line in reader]
169
170 if make_example in (Example.fromdict, Example.fromJSON):
171 fields, field_dict = [], fields
172 for field in field_dict.values():
173 if isinstance(field, list):
174 fields.extend(field)
175 else:
176 fields.append(field)
177
178 super(TabularDataset, self).__init__(examples, fields, **kwargs)
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchtext/data/dataset.py b/torchtext/data/dataset.py
--- a/torchtext/data/dataset.py
+++ b/torchtext/data/dataset.py
@@ -2,6 +2,7 @@
import os
import zipfile
import tarfile
+from functools import partial
import torch.utils.data
@@ -141,19 +142,22 @@
path (str): Path to the data file.
format (str): The format of the data file. One of "CSV", "TSV", or
"JSON" (case-insensitive).
- fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]: For CSV and
- TSV formats, list of tuples of (name, field). The list should be in
- the same order as the columns in the CSV or TSV file, while tuples of
- (name, None) represent columns that will be ignored. For JSON format,
- dictionary whose keys are the JSON keys and whose values are tuples of
- (name, field). This allows the user to rename columns from their JSON key
- names and also enables selecting a subset of columns to load
- (since JSON keys not present in the input dictionary are ignored).
+ fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:
+ If using a list, the format must be CSV or TSV, and the values of the list
+ should be tuples of (name, field).
+ The fields should be in the same order as the columns in the CSV or TSV
+ file, while tuples of (name, None) represent columns that will be ignored.
+
+ If using a dict, the keys should be a subset of the JSON keys or CSV/TSV
+ columns, and the values should be tuples of (name, field).
+ Keys not present in the input dictionary are ignored.
+ This allows the user to rename columns from their JSON/CSV/TSV key names
+ and also enables selecting a subset of columns to load.
skip_header (bool): Whether to skip the first line of the input file.
"""
make_example = {
'json': Example.fromJSON, 'dict': Example.fromdict,
- 'tsv': Example.fromTSV, 'csv': Example.fromCSV}[format.lower()]
+ 'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format.lower()]
with io.open(os.path.expanduser(path), encoding="utf8") as f:
if format == 'csv':
@@ -163,8 +167,18 @@
else:
reader = f
+ if format in ['csv', 'tsv'] and isinstance(fields, dict):
+ if skip_header:
+ raise ValueError('When using a dict to specify fields with a {} file,'
+ 'skip_header must be False and'
+ 'the file must have a header.'.format(format))
+ header = next(reader)
+ field_to_index = {f: header.index(f) for f in fields.keys()}
+ make_example = partial(make_example, field_to_index=field_to_index)
+
if skip_header:
next(reader)
+
examples = [make_example(line, fields) for line in reader]
if make_example in (Example.fromdict, Example.fromJSON):
diff --git a/torchtext/data/example.py b/torchtext/data/example.py
--- a/torchtext/data/example.py
+++ b/torchtext/data/example.py
@@ -29,12 +29,13 @@
return ex
@classmethod
- def fromTSV(cls, data, fields):
- return cls.fromlist(data, fields)
-
- @classmethod
- def fromCSV(cls, data, fields):
- return cls.fromlist(data, fields)
+ def fromCSV(cls, data, fields, field_to_index=None):
+ if field_to_index is None:
+ return cls.fromlist(data, fields)
+ else:
+ assert(isinstance(fields, dict))
+ data_dict = {f: data[idx] for f, idx in field_to_index.items()}
+ return cls.fromdict(data_dict, fields)
@classmethod
def fromlist(cls, data, fields):
| {"golden_diff": "diff --git a/torchtext/data/dataset.py b/torchtext/data/dataset.py\n--- a/torchtext/data/dataset.py\n+++ b/torchtext/data/dataset.py\n@@ -2,6 +2,7 @@\n import os\n import zipfile\n import tarfile\n+from functools import partial\n \n import torch.utils.data\n \n@@ -141,19 +142,22 @@\n path (str): Path to the data file.\n format (str): The format of the data file. One of \"CSV\", \"TSV\", or\n \"JSON\" (case-insensitive).\n- fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]: For CSV and\n- TSV formats, list of tuples of (name, field). The list should be in\n- the same order as the columns in the CSV or TSV file, while tuples of\n- (name, None) represent columns that will be ignored. For JSON format,\n- dictionary whose keys are the JSON keys and whose values are tuples of\n- (name, field). This allows the user to rename columns from their JSON key\n- names and also enables selecting a subset of columns to load\n- (since JSON keys not present in the input dictionary are ignored).\n+ fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:\n+ If using a list, the format must be CSV or TSV, and the values of the list\n+ should be tuples of (name, field).\n+ The fields should be in the same order as the columns in the CSV or TSV\n+ file, while tuples of (name, None) represent columns that will be ignored.\n+\n+ If using a dict, the keys should be a subset of the JSON keys or CSV/TSV\n+ columns, and the values should be tuples of (name, field).\n+ Keys not present in the input dictionary are ignored.\n+ This allows the user to rename columns from their JSON/CSV/TSV key names\n+ and also enables selecting a subset of columns to load.\n skip_header (bool): Whether to skip the first line of the input file.\n \"\"\"\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n- 'tsv': Example.fromTSV, 'csv': Example.fromCSV}[format.lower()]\n+ 'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format.lower()]\n \n with io.open(os.path.expanduser(path), encoding=\"utf8\") as f:\n if format == 'csv':\n@@ -163,8 +167,18 @@\n else:\n reader = f\n \n+ if format in ['csv', 'tsv'] and isinstance(fields, dict):\n+ if skip_header:\n+ raise ValueError('When using a dict to specify fields with a {} file,'\n+ 'skip_header must be False and'\n+ 'the file must have a header.'.format(format))\n+ header = next(reader)\n+ field_to_index = {f: header.index(f) for f in fields.keys()}\n+ make_example = partial(make_example, field_to_index=field_to_index)\n+\n if skip_header:\n next(reader)\n+\n examples = [make_example(line, fields) for line in reader]\n \n if make_example in (Example.fromdict, Example.fromJSON):\ndiff --git a/torchtext/data/example.py b/torchtext/data/example.py\n--- a/torchtext/data/example.py\n+++ b/torchtext/data/example.py\n@@ -29,12 +29,13 @@\n return ex\n \n @classmethod\n- def fromTSV(cls, data, fields):\n- return cls.fromlist(data, fields)\n-\n- @classmethod\n- def fromCSV(cls, data, fields):\n- return cls.fromlist(data, fields)\n+ def fromCSV(cls, data, fields, field_to_index=None):\n+ if field_to_index is None:\n+ return cls.fromlist(data, fields)\n+ else:\n+ assert(isinstance(fields, dict))\n+ data_dict = {f: data[idx] for f, idx in field_to_index.items()}\n+ return cls.fromdict(data_dict, fields)\n \n @classmethod\n def fromlist(cls, data, fields):\n", "issue": "TabularDataset class with header does not match fields by name\nCurrently, the TabularDataset class matches each feature in a csv/tsv file to its field by requiring the (name, Field) tuples passed to the fields kwarg to be in the same order as the columns csv/tsv file.\r\n\r\nI think this is counterintuitive, especially if the csv/tsv file has a header. If there is a header, I think that the user should be allowed to specify the fields similar to the json format, where they pass in a dict mapping the column name to the (name, Field) tuple.\r\n\r\nHere\u2019s an example of what I\u2019m thinking.\r\n\r\n```\r\n$ head -n 2 train.csv\r\n\"text\",\"label\"\r\n\"hello\",0\r\n```\r\n\r\n```python\r\n>>> pos = data.TabularDataset(\r\n... path='train.tsv', format='csv',\r\n... fields={'labels': ('labels', data.Field()),\r\n... 'text': ('text', data.Field())}\r\n...\r\n```\r\n\r\nI think they should also be allowed to select a subset of the rows using this method, similar to the API for the json format (columns not in the keys will be ignored).\r\n\r\nThis should provide a more consistent and intuitive API. If the maintainers are willing to accept this API, I would be happy to implement it and send a PR.\n", "before_files": [{"content": "import json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data, fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n return cls.fromlist(data, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n if isinstance(val, six.string_types):\n val = val.rstrip('\\n')\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}, {"content": "import io\nimport os\nimport zipfile\nimport tarfile\n\nimport torch.utils.data\n\nfrom .example import Example\nfrom ..utils import download_from_url, unicode_csv_reader\n\n\nclass Dataset(torch.utils.data.Dataset):\n \"\"\"Defines a dataset composed of Examples along with its Fields.\n\n Attributes:\n sort_key (callable): A key to use for sorting dataset examples for batching\n together examples with similar lengths to minimize padding.\n examples (list(Example)): The examples in this dataset.\n fields (dict[str, Field]): Contains the name of each column or field, together\n with the corresponding Field object. Two fields with the same Field object\n will have a shared vocabulary.\n \"\"\"\n sort_key = None\n\n def __init__(self, examples, fields, filter_pred=None):\n \"\"\"Create a dataset from a list of Examples and Fields.\n\n Arguments:\n examples: List of Examples.\n fields (List(tuple(str, Field))): The Fields to use in this tuple. The\n string is a field name, and the Field is the associated field.\n filter_pred (callable or None): Use only examples for which\n filter_pred(example) is True, or use all examples if None.\n Default is None.\n \"\"\"\n if filter_pred is not None:\n make_list = isinstance(examples, list)\n examples = filter(filter_pred, examples)\n if make_list:\n examples = list(examples)\n self.examples = examples\n self.fields = dict(fields)\n\n @classmethod\n def splits(cls, path=None, root='.data', train=None, validation=None,\n test=None, **kwargs):\n \"\"\"Create Dataset objects for multiple splits of a dataset.\n\n Arguments:\n path (str): Common prefix of the splits' file paths, or None to use\n the result of cls.download(root).\n root (str): Root dataset storage directory. Default is '.data'.\n train (str): Suffix to add to path for the train set, or None for no\n train set. Default is None.\n validation (str): Suffix to add to path for the validation set, or None\n for no validation set. Default is None.\n test (str): Suffix to add to path for the test set, or None for no test\n set. Default is None.\n Remaining keyword arguments: Passed to the constructor of the\n Dataset (sub)class being used.\n\n Returns:\n Tuple[Dataset]: Datasets for train, validation, and\n test splits in that order, if provided.\n \"\"\"\n if path is None:\n path = cls.download(root)\n train_data = None if train is None else cls(\n os.path.join(path, train), **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n def __getitem__(self, i):\n return self.examples[i]\n\n def __len__(self):\n try:\n return len(self.examples)\n except TypeError:\n return 2**32\n\n def __iter__(self):\n for x in self.examples:\n yield x\n\n def __getattr__(self, attr):\n if attr in self.fields:\n for x in self.examples:\n yield getattr(x, attr)\n\n @classmethod\n def download(cls, root, check=None):\n \"\"\"Download and unzip an online archive (.zip, .gz, or .tgz).\n\n Arguments:\n root (str): Folder to download data to.\n check (str or None): Folder whose existence indicates\n that the dataset has already been downloaded, or\n None to check the existence of root/{cls.name}.\n\n Returns:\n str: Path to extracted dataset.\n \"\"\"\n path = os.path.join(root, cls.name)\n check = path if check is None else check\n if not os.path.isdir(check):\n for url in cls.urls:\n if isinstance(url, tuple):\n url, filename = url\n else:\n filename = os.path.basename(url)\n zpath = os.path.join(path, filename)\n if not os.path.isfile(zpath):\n if not os.path.exists(os.path.dirname(zpath)):\n os.makedirs(os.path.dirname(zpath))\n print('downloading {}'.format(filename))\n download_from_url(url, zpath)\n ext = os.path.splitext(filename)[-1]\n if ext == '.zip':\n with zipfile.ZipFile(zpath, 'r') as zfile:\n print('extracting')\n zfile.extractall(path)\n elif ext in ['.gz', '.tgz']:\n with tarfile.open(zpath, 'r:gz') as tar:\n dirs = [member for member in tar.getmembers()]\n tar.extractall(path=path, members=dirs)\n return os.path.join(path, cls.dirname)\n\n\nclass TabularDataset(Dataset):\n \"\"\"Defines a Dataset of columns stored in CSV, TSV, or JSON format.\"\"\"\n\n def __init__(self, path, format, fields, skip_header=False, **kwargs):\n \"\"\"Create a TabularDataset given a path, file format, and field list.\n\n Arguments:\n path (str): Path to the data file.\n format (str): The format of the data file. One of \"CSV\", \"TSV\", or\n \"JSON\" (case-insensitive).\n fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]: For CSV and\n TSV formats, list of tuples of (name, field). The list should be in\n the same order as the columns in the CSV or TSV file, while tuples of\n (name, None) represent columns that will be ignored. For JSON format,\n dictionary whose keys are the JSON keys and whose values are tuples of\n (name, field). This allows the user to rename columns from their JSON key\n names and also enables selecting a subset of columns to load\n (since JSON keys not present in the input dictionary are ignored).\n skip_header (bool): Whether to skip the first line of the input file.\n \"\"\"\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n 'tsv': Example.fromTSV, 'csv': Example.fromCSV}[format.lower()]\n\n with io.open(os.path.expanduser(path), encoding=\"utf8\") as f:\n if format == 'csv':\n reader = unicode_csv_reader(f)\n elif format == 'tsv':\n reader = unicode_csv_reader(f, delimiter='\\t')\n else:\n reader = f\n\n if skip_header:\n next(reader)\n examples = [make_example(line, fields) for line in reader]\n\n if make_example in (Example.fromdict, Example.fromJSON):\n fields, field_dict = [], fields\n for field in field_dict.values():\n if isinstance(field, list):\n fields.extend(field)\n else:\n fields.append(field)\n\n super(TabularDataset, self).__init__(examples, fields, **kwargs)\n", "path": "torchtext/data/dataset.py"}], "after_files": [{"content": "import json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromCSV(cls, data, fields, field_to_index=None):\n if field_to_index is None:\n return cls.fromlist(data, fields)\n else:\n assert(isinstance(fields, dict))\n data_dict = {f: data[idx] for f, idx in field_to_index.items()}\n return cls.fromdict(data_dict, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n if isinstance(val, six.string_types):\n val = val.rstrip('\\n')\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}, {"content": "import io\nimport os\nimport zipfile\nimport tarfile\nfrom functools import partial\n\nimport torch.utils.data\n\nfrom .example import Example\nfrom ..utils import download_from_url, unicode_csv_reader\n\n\nclass Dataset(torch.utils.data.Dataset):\n \"\"\"Defines a dataset composed of Examples along with its Fields.\n\n Attributes:\n sort_key (callable): A key to use for sorting dataset examples for batching\n together examples with similar lengths to minimize padding.\n examples (list(Example)): The examples in this dataset.\n fields (dict[str, Field]): Contains the name of each column or field, together\n with the corresponding Field object. Two fields with the same Field object\n will have a shared vocabulary.\n \"\"\"\n sort_key = None\n\n def __init__(self, examples, fields, filter_pred=None):\n \"\"\"Create a dataset from a list of Examples and Fields.\n\n Arguments:\n examples: List of Examples.\n fields (List(tuple(str, Field))): The Fields to use in this tuple. The\n string is a field name, and the Field is the associated field.\n filter_pred (callable or None): Use only examples for which\n filter_pred(example) is True, or use all examples if None.\n Default is None.\n \"\"\"\n if filter_pred is not None:\n make_list = isinstance(examples, list)\n examples = filter(filter_pred, examples)\n if make_list:\n examples = list(examples)\n self.examples = examples\n self.fields = dict(fields)\n\n @classmethod\n def splits(cls, path=None, root='.data', train=None, validation=None,\n test=None, **kwargs):\n \"\"\"Create Dataset objects for multiple splits of a dataset.\n\n Arguments:\n path (str): Common prefix of the splits' file paths, or None to use\n the result of cls.download(root).\n root (str): Root dataset storage directory. Default is '.data'.\n train (str): Suffix to add to path for the train set, or None for no\n train set. Default is None.\n validation (str): Suffix to add to path for the validation set, or None\n for no validation set. Default is None.\n test (str): Suffix to add to path for the test set, or None for no test\n set. Default is None.\n Remaining keyword arguments: Passed to the constructor of the\n Dataset (sub)class being used.\n\n Returns:\n Tuple[Dataset]: Datasets for train, validation, and\n test splits in that order, if provided.\n \"\"\"\n if path is None:\n path = cls.download(root)\n train_data = None if train is None else cls(\n os.path.join(path, train), **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n def __getitem__(self, i):\n return self.examples[i]\n\n def __len__(self):\n try:\n return len(self.examples)\n except TypeError:\n return 2**32\n\n def __iter__(self):\n for x in self.examples:\n yield x\n\n def __getattr__(self, attr):\n if attr in self.fields:\n for x in self.examples:\n yield getattr(x, attr)\n\n @classmethod\n def download(cls, root, check=None):\n \"\"\"Download and unzip an online archive (.zip, .gz, or .tgz).\n\n Arguments:\n root (str): Folder to download data to.\n check (str or None): Folder whose existence indicates\n that the dataset has already been downloaded, or\n None to check the existence of root/{cls.name}.\n\n Returns:\n str: Path to extracted dataset.\n \"\"\"\n path = os.path.join(root, cls.name)\n check = path if check is None else check\n if not os.path.isdir(check):\n for url in cls.urls:\n if isinstance(url, tuple):\n url, filename = url\n else:\n filename = os.path.basename(url)\n zpath = os.path.join(path, filename)\n if not os.path.isfile(zpath):\n if not os.path.exists(os.path.dirname(zpath)):\n os.makedirs(os.path.dirname(zpath))\n print('downloading {}'.format(filename))\n download_from_url(url, zpath)\n ext = os.path.splitext(filename)[-1]\n if ext == '.zip':\n with zipfile.ZipFile(zpath, 'r') as zfile:\n print('extracting')\n zfile.extractall(path)\n elif ext in ['.gz', '.tgz']:\n with tarfile.open(zpath, 'r:gz') as tar:\n dirs = [member for member in tar.getmembers()]\n tar.extractall(path=path, members=dirs)\n return os.path.join(path, cls.dirname)\n\n\nclass TabularDataset(Dataset):\n \"\"\"Defines a Dataset of columns stored in CSV, TSV, or JSON format.\"\"\"\n\n def __init__(self, path, format, fields, skip_header=False, **kwargs):\n \"\"\"Create a TabularDataset given a path, file format, and field list.\n\n Arguments:\n path (str): Path to the data file.\n format (str): The format of the data file. One of \"CSV\", \"TSV\", or\n \"JSON\" (case-insensitive).\n fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:\n If using a list, the format must be CSV or TSV, and the values of the list\n should be tuples of (name, field).\n The fields should be in the same order as the columns in the CSV or TSV\n file, while tuples of (name, None) represent columns that will be ignored.\n\n If using a dict, the keys should be a subset of the JSON keys or CSV/TSV\n columns, and the values should be tuples of (name, field).\n Keys not present in the input dictionary are ignored.\n This allows the user to rename columns from their JSON/CSV/TSV key names\n and also enables selecting a subset of columns to load.\n skip_header (bool): Whether to skip the first line of the input file.\n \"\"\"\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n 'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format.lower()]\n\n with io.open(os.path.expanduser(path), encoding=\"utf8\") as f:\n if format == 'csv':\n reader = unicode_csv_reader(f)\n elif format == 'tsv':\n reader = unicode_csv_reader(f, delimiter='\\t')\n else:\n reader = f\n\n if format in ['csv', 'tsv'] and isinstance(fields, dict):\n if skip_header:\n raise ValueError('When using a dict to specify fields with a {} file,'\n 'skip_header must be False and'\n 'the file must have a header.'.format(format))\n header = next(reader)\n field_to_index = {f: header.index(f) for f in fields.keys()}\n make_example = partial(make_example, field_to_index=field_to_index)\n\n if skip_header:\n next(reader)\n\n examples = [make_example(line, fields) for line in reader]\n\n if make_example in (Example.fromdict, Example.fromJSON):\n fields, field_dict = [], fields\n for field in field_dict.values():\n if isinstance(field, list):\n fields.extend(field)\n else:\n fields.append(field)\n\n super(TabularDataset, self).__init__(examples, fields, **kwargs)\n", "path": "torchtext/data/dataset.py"}]} | 3,041 | 934 |
gh_patches_debug_15810 | rasdani/github-patches | git_diff | zulip__zulip-9272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the "Delete streams" administrative tab
Now that we have a "Delete stream" button in the main streams UI, which has lots more context on description/traffic/subscribers, this page is useless. We should just remove it.
We should make sure to remove the documentation in /help/ linking to this as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/bugdown/help_settings_links.py`
Content:
```
1 import re
2 import markdown
3 from typing import Any, Dict, List, Optional, Union, Text
4 from typing.re import Match
5 from markdown.preprocessors import Preprocessor
6
7 REGEXP = re.compile(r'\{settings_tab\|(?P<setting_identifier>.*?)\}')
8
9 link_mapping = {
10 # a mapping from the setting identifier that is the same as the final URL
11 # breadcrumb to that setting to the name of its setting type, the setting
12 # name as it appears in the user interface, and a relative link that can
13 # be used to get to that setting
14 'your-account': ['Settings', 'Your account', '/#settings/your-account'],
15 'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],
16 'notifications': ['Settings', 'Notifications', '/#settings/notifications'],
17 'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],
18 'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],
19 'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],
20 'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],
21
22 'organization-profile': ['Manage organization', 'Organization profile',
23 '/#organization/organization-profile'],
24 'organization-settings': ['Manage organization', 'Organization settings',
25 '/#organization/organization-settings'],
26 'organization-permissions': ['Manage organization', 'Organization permissions',
27 '/#organization/organization-permissions'],
28 'emoji-settings': ['Manage organization', 'Custom emoji',
29 '/#organization/emoji-settings'],
30 'auth-methods': ['Manage organization', 'Authentication methods',
31 '/#organization/auth-methods'],
32 'user-groups-admin': ['Manage organization', 'User groups',
33 '/#organization/user-groups-admin'],
34 'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],
35 'deactivated-users-admin': ['Manage organization', 'Deactivated users',
36 '/#organization/deactivated-users-admin'],
37 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],
38 'streams-list-admin': ['Manage organization', 'Delete streams',
39 '/#organization/streams-list-admin'],
40 'default-streams-list': ['Manage organization', 'Default streams',
41 '/#organization/default-streams-list'],
42 'filter-settings': ['Manage organization', 'Filter settings',
43 '/#organization/filter-settings'],
44 'profile-field-settings': ['Manage organization', 'Custom profile fields',
45 '/#organization/profile-field-settings'],
46 'invites-list-admin': ['Manage organization', 'Invitations',
47 '/#organization/invites-list-admin'],
48 }
49
50 settings_markdown = """
51 1. From your desktop, click on the **gear**
52 (<i class="icon-vector-cog"></i>) in the upper right corner.
53
54 1. Select **%(setting_type_name)s**.
55
56 1. On the left, click %(setting_reference)s.
57 """
58
59
60 class SettingHelpExtension(markdown.Extension):
61 def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
62 """ Add SettingHelpExtension to the Markdown instance. """
63 md.registerExtension(self)
64 md.preprocessors.add('setting', Setting(), '_begin')
65
66 relative_settings_links = None # type: Optional[bool]
67
68 def set_relative_settings_links(value: bool) -> None:
69 global relative_settings_links
70 relative_settings_links = value
71
72 class Setting(Preprocessor):
73 def run(self, lines: List[str]) -> List[str]:
74 done = False
75 while not done:
76 for line in lines:
77 loc = lines.index(line)
78 match = REGEXP.search(line)
79
80 if match:
81 text = [self.handleMatch(match)]
82 # The line that contains the directive to include the macro
83 # may be preceded or followed by text or tags, in that case
84 # we need to make sure that any preceding or following text
85 # stays the same.
86 line_split = REGEXP.split(line, maxsplit=0)
87 preceding = line_split[0]
88 following = line_split[-1]
89 text = [preceding] + text + [following]
90 lines = lines[:loc] + text + lines[loc+1:]
91 break
92 else:
93 done = True
94 return lines
95
96 def handleMatch(self, match: Match[Text]) -> Text:
97 setting_identifier = match.group('setting_identifier')
98 setting_type_name = link_mapping[setting_identifier][0]
99 setting_name = link_mapping[setting_identifier][1]
100 setting_link = link_mapping[setting_identifier][2]
101 if relative_settings_links:
102 setting_reference = "[%s](%s)" % (setting_name, setting_link)
103 else:
104 setting_reference = "**%s**" % (setting_name,)
105 instructions = settings_markdown % {'setting_type_name': setting_type_name,
106 'setting_reference': setting_reference}
107 return instructions
108
109
110 def makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:
111 return SettingHelpExtension(*args, **kwargs)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/bugdown/help_settings_links.py b/zerver/lib/bugdown/help_settings_links.py
--- a/zerver/lib/bugdown/help_settings_links.py
+++ b/zerver/lib/bugdown/help_settings_links.py
@@ -35,8 +35,6 @@
'deactivated-users-admin': ['Manage organization', 'Deactivated users',
'/#organization/deactivated-users-admin'],
'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],
- 'streams-list-admin': ['Manage organization', 'Delete streams',
- '/#organization/streams-list-admin'],
'default-streams-list': ['Manage organization', 'Default streams',
'/#organization/default-streams-list'],
'filter-settings': ['Manage organization', 'Filter settings',
| {"golden_diff": "diff --git a/zerver/lib/bugdown/help_settings_links.py b/zerver/lib/bugdown/help_settings_links.py\n--- a/zerver/lib/bugdown/help_settings_links.py\n+++ b/zerver/lib/bugdown/help_settings_links.py\n@@ -35,8 +35,6 @@\n 'deactivated-users-admin': ['Manage organization', 'Deactivated users',\n '/#organization/deactivated-users-admin'],\n 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],\n- 'streams-list-admin': ['Manage organization', 'Delete streams',\n- '/#organization/streams-list-admin'],\n 'default-streams-list': ['Manage organization', 'Default streams',\n '/#organization/default-streams-list'],\n 'filter-settings': ['Manage organization', 'Filter settings',\n", "issue": "Remove the \"Delete streams\" administrative tab\nNow that we have a \"Delete stream\" button in the main streams UI, which has lots more context on description/traffic/subscribers, this page is useless. We should just remove it.\r\n\r\nWe should make sure to remove the documentation in /help/ linking to this as well.\n", "before_files": [{"content": "import re\nimport markdown\nfrom typing import Any, Dict, List, Optional, Union, Text\nfrom typing.re import Match\nfrom markdown.preprocessors import Preprocessor\n\nREGEXP = re.compile(r'\\{settings_tab\\|(?P<setting_identifier>.*?)\\}')\n\nlink_mapping = {\n # a mapping from the setting identifier that is the same as the final URL\n # breadcrumb to that setting to the name of its setting type, the setting\n # name as it appears in the user interface, and a relative link that can\n # be used to get to that setting\n 'your-account': ['Settings', 'Your account', '/#settings/your-account'],\n 'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],\n 'notifications': ['Settings', 'Notifications', '/#settings/notifications'],\n 'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],\n 'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],\n 'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],\n 'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],\n\n 'organization-profile': ['Manage organization', 'Organization profile',\n '/#organization/organization-profile'],\n 'organization-settings': ['Manage organization', 'Organization settings',\n '/#organization/organization-settings'],\n 'organization-permissions': ['Manage organization', 'Organization permissions',\n '/#organization/organization-permissions'],\n 'emoji-settings': ['Manage organization', 'Custom emoji',\n '/#organization/emoji-settings'],\n 'auth-methods': ['Manage organization', 'Authentication methods',\n '/#organization/auth-methods'],\n 'user-groups-admin': ['Manage organization', 'User groups',\n '/#organization/user-groups-admin'],\n 'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],\n 'deactivated-users-admin': ['Manage organization', 'Deactivated users',\n '/#organization/deactivated-users-admin'],\n 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],\n 'streams-list-admin': ['Manage organization', 'Delete streams',\n '/#organization/streams-list-admin'],\n 'default-streams-list': ['Manage organization', 'Default streams',\n '/#organization/default-streams-list'],\n 'filter-settings': ['Manage organization', 'Filter settings',\n '/#organization/filter-settings'],\n 'profile-field-settings': ['Manage organization', 'Custom profile fields',\n '/#organization/profile-field-settings'],\n 'invites-list-admin': ['Manage organization', 'Invitations',\n '/#organization/invites-list-admin'],\n}\n\nsettings_markdown = \"\"\"\n1. From your desktop, click on the **gear**\n (<i class=\"icon-vector-cog\"></i>) in the upper right corner.\n\n1. Select **%(setting_type_name)s**.\n\n1. On the left, click %(setting_reference)s.\n\"\"\"\n\n\nclass SettingHelpExtension(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add SettingHelpExtension to the Markdown instance. \"\"\"\n md.registerExtension(self)\n md.preprocessors.add('setting', Setting(), '_begin')\n\nrelative_settings_links = None # type: Optional[bool]\n\ndef set_relative_settings_links(value: bool) -> None:\n global relative_settings_links\n relative_settings_links = value\n\nclass Setting(Preprocessor):\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if match:\n text = [self.handleMatch(match)]\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def handleMatch(self, match: Match[Text]) -> Text:\n setting_identifier = match.group('setting_identifier')\n setting_type_name = link_mapping[setting_identifier][0]\n setting_name = link_mapping[setting_identifier][1]\n setting_link = link_mapping[setting_identifier][2]\n if relative_settings_links:\n setting_reference = \"[%s](%s)\" % (setting_name, setting_link)\n else:\n setting_reference = \"**%s**\" % (setting_name,)\n instructions = settings_markdown % {'setting_type_name': setting_type_name,\n 'setting_reference': setting_reference}\n return instructions\n\n\ndef makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:\n return SettingHelpExtension(*args, **kwargs)\n", "path": "zerver/lib/bugdown/help_settings_links.py"}], "after_files": [{"content": "import re\nimport markdown\nfrom typing import Any, Dict, List, Optional, Union, Text\nfrom typing.re import Match\nfrom markdown.preprocessors import Preprocessor\n\nREGEXP = re.compile(r'\\{settings_tab\\|(?P<setting_identifier>.*?)\\}')\n\nlink_mapping = {\n # a mapping from the setting identifier that is the same as the final URL\n # breadcrumb to that setting to the name of its setting type, the setting\n # name as it appears in the user interface, and a relative link that can\n # be used to get to that setting\n 'your-account': ['Settings', 'Your account', '/#settings/your-account'],\n 'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],\n 'notifications': ['Settings', 'Notifications', '/#settings/notifications'],\n 'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],\n 'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],\n 'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],\n 'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],\n\n 'organization-profile': ['Manage organization', 'Organization profile',\n '/#organization/organization-profile'],\n 'organization-settings': ['Manage organization', 'Organization settings',\n '/#organization/organization-settings'],\n 'organization-permissions': ['Manage organization', 'Organization permissions',\n '/#organization/organization-permissions'],\n 'emoji-settings': ['Manage organization', 'Custom emoji',\n '/#organization/emoji-settings'],\n 'auth-methods': ['Manage organization', 'Authentication methods',\n '/#organization/auth-methods'],\n 'user-groups-admin': ['Manage organization', 'User groups',\n '/#organization/user-groups-admin'],\n 'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],\n 'deactivated-users-admin': ['Manage organization', 'Deactivated users',\n '/#organization/deactivated-users-admin'],\n 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],\n 'default-streams-list': ['Manage organization', 'Default streams',\n '/#organization/default-streams-list'],\n 'filter-settings': ['Manage organization', 'Filter settings',\n '/#organization/filter-settings'],\n 'profile-field-settings': ['Manage organization', 'Custom profile fields',\n '/#organization/profile-field-settings'],\n 'invites-list-admin': ['Manage organization', 'Invitations',\n '/#organization/invites-list-admin'],\n}\n\nsettings_markdown = \"\"\"\n1. From your desktop, click on the **gear**\n (<i class=\"icon-vector-cog\"></i>) in the upper right corner.\n\n1. Select **%(setting_type_name)s**.\n\n1. On the left, click %(setting_reference)s.\n\"\"\"\n\n\nclass SettingHelpExtension(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add SettingHelpExtension to the Markdown instance. \"\"\"\n md.registerExtension(self)\n md.preprocessors.add('setting', Setting(), '_begin')\n\nrelative_settings_links = None # type: Optional[bool]\n\ndef set_relative_settings_links(value: bool) -> None:\n global relative_settings_links\n relative_settings_links = value\n\nclass Setting(Preprocessor):\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if match:\n text = [self.handleMatch(match)]\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def handleMatch(self, match: Match[Text]) -> Text:\n setting_identifier = match.group('setting_identifier')\n setting_type_name = link_mapping[setting_identifier][0]\n setting_name = link_mapping[setting_identifier][1]\n setting_link = link_mapping[setting_identifier][2]\n if relative_settings_links:\n setting_reference = \"[%s](%s)\" % (setting_name, setting_link)\n else:\n setting_reference = \"**%s**\" % (setting_name,)\n instructions = settings_markdown % {'setting_type_name': setting_type_name,\n 'setting_reference': setting_reference}\n return instructions\n\n\ndef makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:\n return SettingHelpExtension(*args, **kwargs)\n", "path": "zerver/lib/bugdown/help_settings_links.py"}]} | 1,653 | 173 |
gh_patches_debug_4644 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Online API documentation has gone missing
### Expected behaviour
I should be able to find API documentation online.
### Actual behaviour

### More info
The latest release of Datacube both added some new dependencies, and bumped the minimum version of existing ones. This has caused the [Read the Docs builds](https://readthedocs.org/projects/datacube-core/builds/19401442/) to start quietly failing.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 tests_require = [
6 'hypothesis',
7 'pycodestyle',
8 'pylint',
9 'pytest',
10 'pytest-cov',
11 'pytest-timeout',
12 'pytest-httpserver',
13 'moto',
14 ]
15 doc_require = [
16 'Sphinx',
17 'sphinx_rtd_theme',
18 'sphinx_autodoc_typehints', # Propagate mypy info into docs
19 'sphinx-click',
20 'recommonmark',
21 'setuptools', # version related dependencies
22 'setuptools_scm[toml]',
23 ]
24
25 extras_require = {
26 'performance': ['ciso8601', 'bottleneck'],
27 'distributed': ['distributed', 'dask[distributed]'],
28 'doc': doc_require,
29 's3': ['boto3', 'botocore'],
30 'test': tests_require,
31 'cf': ['compliance-checker>=4.0.0'],
32 }
33
34 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [
35 'test',
36 'doc',
37 'performance',
38 's3',
39 'distributed',
40 ]], [])))
41
42 # An 'all' option, following ipython naming conventions.
43 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
44
45 extra_plugins = dict(read=[], write=[], index=[])
46
47 setup(
48 name='datacube',
49 python_requires='>=3.8.0',
50
51 url='https://github.com/opendatacube/datacube-core',
52 author='Open Data Cube',
53 maintainer='Open Data Cube',
54 maintainer_email='',
55 description='An analysis environment for satellite and other earth observation data',
56 long_description=open('README.rst').read(),
57 long_description_content_type='text/x-rst',
58 license='Apache License 2.0',
59 classifiers=[
60 "Development Status :: 4 - Beta",
61 "Intended Audience :: Developers",
62 "Intended Audience :: Science/Research",
63 "License :: OSI Approved :: Apache Software License",
64 "Natural Language :: English",
65 "Operating System :: MacOS :: MacOS X",
66 "Operating System :: POSIX",
67 "Operating System :: POSIX :: BSD",
68 "Operating System :: POSIX :: Linux",
69 "Operating System :: Microsoft :: Windows",
70 "Programming Language :: Python",
71 "Programming Language :: Python :: 3",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Programming Language :: Python :: 3.10",
75 "Topic :: Scientific/Engineering :: GIS",
76 "Topic :: Scientific/Engineering :: Information Analysis",
77 ],
78
79 packages=find_packages(
80 exclude=('tests', 'tests.*',
81 'integration_tests', 'integration_tests.*')
82 ),
83 package_data={
84 '': ['*.yaml', '*/*.yaml'],
85 'datacube': ['py.typed'],
86 },
87 scripts=[],
88 install_requires=[
89 'affine',
90 'attrs>=18.1',
91 'pyproj>=2.5',
92 'shapely>=2.0',
93 'cachetools',
94 'click>=5.0',
95 'cloudpickle>=0.4',
96 'dask[array]',
97 'distributed',
98 'jsonschema',
99 'netcdf4',
100 'numpy',
101 'psycopg2',
102 'lark',
103 'pandas',
104 'python-dateutil',
105 'pyyaml',
106 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1
107 'ruamel.yaml',
108 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4 postgres driver implementation is not 2.0 compatible.
109 'GeoAlchemy2',
110 'toolz',
111 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
112 'packaging',
113 ],
114 extras_require=extras_require,
115 tests_require=tests_require,
116
117 entry_points={
118 'console_scripts': [
119 'datacube = datacube.scripts.cli_app:cli',
120 'datacube-search = datacube.scripts.search_tool:cli',
121 'datacube-worker = datacube.execution.worker:main',
122 ],
123 'datacube.plugins.io.read': [
124 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
125 *extra_plugins['read'],
126 ],
127 'datacube.plugins.io.write': [
128 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
129 *extra_plugins['write'],
130 ],
131 'datacube.plugins.index': [
132 'default = datacube.index.postgres.index:index_driver_init',
133 'null = datacube.index.null.index:index_driver_init',
134 'memory = datacube.index.memory.index:index_driver_init',
135 'postgis = datacube.index.postgis.index:index_driver_init',
136 *extra_plugins['index'],
137 ],
138 },
139 )
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,12 +14,13 @@
]
doc_require = [
'Sphinx',
- 'sphinx_rtd_theme',
'sphinx_autodoc_typehints', # Propagate mypy info into docs
'sphinx-click',
'recommonmark',
- 'setuptools', # version related dependencies
- 'setuptools_scm[toml]',
+ 'autodocsumm',
+ 'beautifulsoup4',
+ 'nbsphinx',
+ 'pydata-sphinx-theme==0.9.0',
]
extras_require = {
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,12 +14,13 @@\n ]\n doc_require = [\n 'Sphinx',\n- 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n- 'setuptools', # version related dependencies\n- 'setuptools_scm[toml]',\n+ 'autodocsumm',\n+ 'beautifulsoup4',\n+ 'nbsphinx',\n+ 'pydata-sphinx-theme==0.9.0',\n ]\n \n extras_require = {\n", "issue": "Online API documentation has gone missing\n### Expected behaviour\r\nI should be able to find API documentation online.\r\n\r\n### Actual behaviour\r\n\r\n\r\n### More info\r\n\r\nThe latest release of Datacube both added some new dependencies, and bumped the minimum version of existing ones. This has caused the [Read the Docs builds](https://readthedocs.org/projects/datacube-core/builds/19401442/) to start quietly failing.\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'attrs>=18.1',\n 'pyproj>=2.5',\n 'shapely>=2.0',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'ruamel.yaml',\n 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4 postgres driver implementation is not 2.0 compatible.\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'autodocsumm',\n 'beautifulsoup4',\n 'nbsphinx',\n 'pydata-sphinx-theme==0.9.0',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'attrs>=18.1',\n 'pyproj>=2.5',\n 'shapely>=2.0',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'ruamel.yaml',\n 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4 postgres driver implementation is not 2.0 compatible.\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]} | 1,857 | 151 |
gh_patches_debug_25523 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2648 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feat: add support for terraform AKS role_based_access_control_enabled
**Describe the issue**
`CKV_AZURE_5`
Recently, azurerm [2.99.0](https://github.com/hashicorp/terraform-provider-azurerm/releases/tag/v2.99.0) was published, which, in preparation for 3.0, brought some syntax changes, one of which is listed below:
***
"Data Source: `azurerm_kubernetes_cluster` - deprecated the `role_based_access_control` block in favour of `azure_active_directory_role_based_access_control` and `role_based_access_control_enabled` properties (https://github.com/hashicorp/terraform-provider-azurerm/issues/15584)"
***
**Examples**
terraform:
```hcl
role_based_access_control_enabled: true
```
This code should pass `CKV_AZURE_5`.
**Version (please complete the following information):**
- Checkov Version 2.0.970
**Additional context**
PR that adds the required support: https://github.com/bridgecrewio/checkov/pull/2648
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/AKSRbacEnabled.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class AKSRbacEnabled(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure RBAC is enabled on AKS clusters"
8 id = "CKV_AZURE_5"
9 supported_resources = ['azurerm_kubernetes_cluster']
10 categories = [CheckCategories.KUBERNETES]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'role_based_access_control/[0]/enabled'
15
16
17 check = AKSRbacEnabled()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
--- a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
+++ b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
@@ -1,17 +1,32 @@
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+import dpath.util
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-class AKSRbacEnabled(BaseResourceValueCheck):
+class AKSRbacEnabled(BaseResourceCheck):
def __init__(self):
name = "Ensure RBAC is enabled on AKS clusters"
id = "CKV_AZURE_5"
- supported_resources = ['azurerm_kubernetes_cluster']
+ supported_resources = ["azurerm_kubernetes_cluster"]
categories = [CheckCategories.KUBERNETES]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+ super().__init__(
+ name=name,
+ id=id,
+ categories=categories,
+ supported_resources=supported_resources,
+ )
- def get_inspected_key(self):
- return 'role_based_access_control/[0]/enabled'
+ def scan_resource_conf(self, conf):
+ self.evaluated_keys = [
+ "role_based_access_control/[0]/enabled", # azurerm < 2.99.0
+ "role_based_access_control_enabled", # azurerm >= 2.99.0
+ ]
+
+ for key in self.evaluated_keys:
+ if dpath.search(conf, key) and dpath.get(conf, key)[0]:
+ return CheckResult.PASSED
+
+ return CheckResult.FAILED
check = AKSRbacEnabled()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n--- a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n+++ b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n@@ -1,17 +1,32 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+import dpath.util\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n-class AKSRbacEnabled(BaseResourceValueCheck):\n+class AKSRbacEnabled(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure RBAC is enabled on AKS clusters\"\n id = \"CKV_AZURE_5\"\n- supported_resources = ['azurerm_kubernetes_cluster']\n+ supported_resources = [\"azurerm_kubernetes_cluster\"]\n categories = [CheckCategories.KUBERNETES]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n+ super().__init__(\n+ name=name,\n+ id=id,\n+ categories=categories,\n+ supported_resources=supported_resources,\n+ )\n \n- def get_inspected_key(self):\n- return 'role_based_access_control/[0]/enabled'\n+ def scan_resource_conf(self, conf):\n+ self.evaluated_keys = [\n+ \"role_based_access_control/[0]/enabled\", # azurerm < 2.99.0\n+ \"role_based_access_control_enabled\", # azurerm >= 2.99.0\n+ ]\n+\n+ for key in self.evaluated_keys:\n+ if dpath.search(conf, key) and dpath.get(conf, key)[0]:\n+ return CheckResult.PASSED\n+\n+ return CheckResult.FAILED\n \n \n check = AKSRbacEnabled()\n", "issue": "feat: add support for terraform AKS role_based_access_control_enabled\n**Describe the issue**\r\n`CKV_AZURE_5`\r\nRecently, azurerm [2.99.0](https://github.com/hashicorp/terraform-provider-azurerm/releases/tag/v2.99.0) was published, which, in preparation for 3.0, brought some syntax changes, one of which is listed below:\r\n***\r\n\"Data Source: `azurerm_kubernetes_cluster` - deprecated the `role_based_access_control` block in favour of `azure_active_directory_role_based_access_control` and `role_based_access_control_enabled` properties (https://github.com/hashicorp/terraform-provider-azurerm/issues/15584)\"\r\n***\r\n\r\n**Examples**\r\n\r\nterraform:\r\n```hcl\r\nrole_based_access_control_enabled: true\r\n```\r\nThis code should pass `CKV_AZURE_5`.\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.0.970\r\n\r\n**Additional context**\r\nPR that adds the required support: https://github.com/bridgecrewio/checkov/pull/2648\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSRbacEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure RBAC is enabled on AKS clusters\"\n id = \"CKV_AZURE_5\"\n supported_resources = ['azurerm_kubernetes_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'role_based_access_control/[0]/enabled'\n\n\ncheck = AKSRbacEnabled()\n", "path": "checkov/terraform/checks/resource/azure/AKSRbacEnabled.py"}], "after_files": [{"content": "import dpath.util\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AKSRbacEnabled(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure RBAC is enabled on AKS clusters\"\n id = \"CKV_AZURE_5\"\n supported_resources = [\"azurerm_kubernetes_cluster\"]\n categories = [CheckCategories.KUBERNETES]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n )\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = [\n \"role_based_access_control/[0]/enabled\", # azurerm < 2.99.0\n \"role_based_access_control_enabled\", # azurerm >= 2.99.0\n ]\n\n for key in self.evaluated_keys:\n if dpath.search(conf, key) and dpath.get(conf, key)[0]:\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = AKSRbacEnabled()\n", "path": "checkov/terraform/checks/resource/azure/AKSRbacEnabled.py"}]} | 694 | 447 |
gh_patches_debug_30858 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1720 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV2_AWS_27 not working as expected
**Describe the bug**
CKV2_AWS_27 throws a failure when configuration seems to be defined properly:
```
resource "aws_rds_cluster" "aurora_cluster" {
# checkov:skip=CKV_AWS_139:Required only in prod, set with conditional on profile
# checkov:skip=CKV2_AWS_8:Snapshots, deletion_protection and prevent_destroy are enough
cluster_identifier = "bc-aurora-cluster"
engine = "aurora-postgresql"
engine_version = "11.9"
database_name = xxxx
master_username = xxxx
master_password = xxxx
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
deletion_protection = var.profile == "prod" ? true : false
db_subnet_group_name = aws_db_subnet_group.aurora_cluster_subnet_group.name
vpc_security_group_ids = [aws_security_group.aurora_cluster_sg.id]
iam_database_authentication_enabled = "true"
db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.aurora_cluster_parameter_group.name
kms_key_id = aws_kms_key.aurora_cluster-kms-key.arn
port = "5432"
preferred_maintenance_window = "Sun:03:00-Sun:07:00"
storage_encrypted = "true"
depends_on = [aws_db_subnet_group.aurora_cluster_subnet_group]
lifecycle {
prevent_destroy = true
}
}
resource "aws_rds_cluster_parameter_group" "aurora_cluster_parameter_group" {
description = "Aurora PG Database Instance Parameter Group"
family = "aurora-postgresql11"
name = "bc-aurora-cluster-sg-dbparamgroup"
parameter {
apply_method = "immediate"
name = "log_connections"
value = "1"
}
parameter {
apply_method = "immediate"
name = "log_disconnections"
value = "1"
}
parameter {
apply_method = "pending-reboot"
name = "track_activity_query_size"
value = "16384"
}
parameter {
apply_method = "immediate"
name = "search_path"
value = "\"$user\",public"
}
parameter {
apply_method = "pending-reboot"
name = "shared_preload_libraries"
value = "auto_explain,pg_stat_statements,pg_hint_plan,pgaudit"
}
parameter {
apply_method = "immediate"
name = "idle_in_transaction_session_timeout"
value = "7200000"
}
parameter {
apply_method = "immediate"
name = "log_min_duration_statement"
value = "5000"
}
parameter {
apply_method = "immediate"
name = "log_rotation_age"
value = "1440"
}
parameter {
apply_method = "immediate"
name = "rds.log_retention_period"
value = "10080"
}
parameter {
apply_method = "immediate"
name = "auto_explain.log_verbose"
value = "1"
}
parameter {
apply_method = "immediate"
name = "log_statement"
value = "ddl"
}
parameter {
apply_method = "immediate"
name = "statement_timeout"
value = "7200000"
}
parameter {
apply_method = "immediate"
name = "log_lock_waits"
value = "1"
}
parameter {
apply_method = "immediate"
name = "random_page_cost"
value = "1"
}
parameter {
apply_method = "immediate"
name = "auto_explain.log_min_duration"
value = "5000"
}
parameter {
apply_method = "immediate"
name = "log_rotation_size"
value = "102400"
}
}
```
**Expected behavior**
Should not fail when `db_cluster_parameter_group_name` is defined with parameters "log_statement" "log_min_duration_statement"
**Desktop (please complete the following information):**
- OS: macOS Big Sur 11.5.2
- Checkov Version 2.0.468
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py`
Content:
```
1 import concurrent.futures
2 import re
3 from typing import List, Tuple, Dict, Any, Optional, Pattern
4
5 from networkx import DiGraph
6
7 from checkov.common.graph.checks_infra.enums import SolverType
8 from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
9
10 from concurrent.futures import ThreadPoolExecutor
11
12 WILDCARD_PATTERN = re.compile(r"(\S+[.][*][.]*)+")
13
14
15 class BaseAttributeSolver(BaseSolver):
16 operator = ""
17
18 def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:
19 super().__init__(SolverType.ATTRIBUTE)
20 self.resource_types = resource_types
21 self.attribute = attribute
22 self.value = value
23
24 def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
25 executer = ThreadPoolExecutor()
26 jobs = []
27 passed_vertices = []
28 failed_vertices = []
29 for _, data in graph_connector.nodes(data=True):
30 jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
31
32 concurrent.futures.wait(jobs)
33 return passed_vertices, failed_vertices
34
35 def get_operation(self, vertex: Dict[str, Any]) -> bool:
36 if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):
37 attribute_patterns = self.get_attribute_patterns(self.attribute)
38 attribute_matches = [
39 attr
40 for attr in vertex
41 if any(re.match(attribute_pattern, attr) for attribute_pattern in attribute_patterns)
42 ]
43 if attribute_matches:
44 return self.resource_type_pred(vertex, self.resource_types) and any(
45 self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches
46 )
47 return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(
48 vertex=vertex, attribute=self.attribute
49 )
50
51 def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
52 raise NotImplementedError
53
54 def _process_node(self, data, passed_vartices, failed_vertices):
55 if not self.resource_type_pred(data, self.resource_types):
56 return
57 if self.get_operation(vertex=data):
58 passed_vartices.append(data)
59 else:
60 failed_vertices.append(data)
61
62 @staticmethod
63 def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:
64 index_pattern = r"[\d+]"
65 split_by_dots = attribute.split(".")
66
67 pattern_parts = []
68 pattern_parts_without_index = []
69 for attr_part in split_by_dots:
70 if attr_part == "*":
71 pattern_parts.append(index_pattern)
72 else:
73 attr_part_pattern = f"({attr_part})"
74 pattern_parts.append(attr_part_pattern)
75 pattern_parts_without_index.append(attr_part_pattern)
76
77 pattern = "[.]".join(pattern_parts)
78 pattern_with_index = re.compile(pattern)
79
80 pattern = "[.]".join(pattern_parts_without_index)
81 pattern_without_index = re.compile(pattern)
82
83 return pattern_with_index, pattern_without_index
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
--- a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
+++ b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
@@ -24,8 +24,8 @@
def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
executer = ThreadPoolExecutor()
jobs = []
- passed_vertices = []
- failed_vertices = []
+ passed_vertices: List[Dict[str, Any]] = []
+ failed_vertices: List[Dict[str, Any]] = []
for _, data in graph_connector.nodes(data=True):
jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
@@ -51,7 +51,9 @@
def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
raise NotImplementedError
- def _process_node(self, data, passed_vartices, failed_vertices):
+ def _process_node(
+ self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]
+ ) -> None:
if not self.resource_type_pred(data, self.resource_types):
return
if self.get_operation(vertex=data):
@@ -61,7 +63,7 @@
@staticmethod
def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:
- index_pattern = r"[\d+]"
+ index_pattern = r"[\d]+"
split_by_dots = attribute.split(".")
pattern_parts = []
| {"golden_diff": "diff --git a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n--- a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n+++ b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n@@ -24,8 +24,8 @@\n def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n executer = ThreadPoolExecutor()\n jobs = []\n- passed_vertices = []\n- failed_vertices = []\n+ passed_vertices: List[Dict[str, Any]] = []\n+ failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n \n@@ -51,7 +51,9 @@\n def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:\n raise NotImplementedError\n \n- def _process_node(self, data, passed_vartices, failed_vertices):\n+ def _process_node(\n+ self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]\n+ ) -> None:\n if not self.resource_type_pred(data, self.resource_types):\n return\n if self.get_operation(vertex=data):\n@@ -61,7 +63,7 @@\n \n @staticmethod\n def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:\n- index_pattern = r\"[\\d+]\"\n+ index_pattern = r\"[\\d]+\"\n split_by_dots = attribute.split(\".\")\n \n pattern_parts = []\n", "issue": "CKV2_AWS_27 not working as expected\n**Describe the bug**\r\nCKV2_AWS_27 throws a failure when configuration seems to be defined properly:\r\n\r\n```\r\nresource \"aws_rds_cluster\" \"aurora_cluster\" {\r\n # checkov:skip=CKV_AWS_139:Required only in prod, set with conditional on profile\r\n # checkov:skip=CKV2_AWS_8:Snapshots, deletion_protection and prevent_destroy are enough\r\n cluster_identifier = \"bc-aurora-cluster\"\r\n engine = \"aurora-postgresql\"\r\n engine_version = \"11.9\"\r\n database_name = xxxx\r\n master_username = xxxx\r\n master_password = xxxx\r\n backup_retention_period = 5\r\n preferred_backup_window = \"07:00-09:00\"\r\n deletion_protection = var.profile == \"prod\" ? true : false\r\n db_subnet_group_name = aws_db_subnet_group.aurora_cluster_subnet_group.name\r\n vpc_security_group_ids = [aws_security_group.aurora_cluster_sg.id]\r\n iam_database_authentication_enabled = \"true\"\r\n db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.aurora_cluster_parameter_group.name\r\n kms_key_id = aws_kms_key.aurora_cluster-kms-key.arn\r\n port = \"5432\"\r\n preferred_maintenance_window = \"Sun:03:00-Sun:07:00\"\r\n storage_encrypted = \"true\"\r\n depends_on = [aws_db_subnet_group.aurora_cluster_subnet_group]\r\n\r\n lifecycle {\r\n prevent_destroy = true\r\n }\r\n}\r\n\r\nresource \"aws_rds_cluster_parameter_group\" \"aurora_cluster_parameter_group\" {\r\n description = \"Aurora PG Database Instance Parameter Group\"\r\n family = \"aurora-postgresql11\"\r\n name = \"bc-aurora-cluster-sg-dbparamgroup\"\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_connections\"\r\n value = \"1\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_disconnections\"\r\n value = \"1\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"pending-reboot\"\r\n name = \"track_activity_query_size\"\r\n value = \"16384\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"search_path\"\r\n value = \"\\\"$user\\\",public\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"pending-reboot\"\r\n name = \"shared_preload_libraries\"\r\n value = \"auto_explain,pg_stat_statements,pg_hint_plan,pgaudit\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"idle_in_transaction_session_timeout\"\r\n value = \"7200000\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_min_duration_statement\"\r\n value = \"5000\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_rotation_age\"\r\n value = \"1440\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"rds.log_retention_period\"\r\n value = \"10080\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"auto_explain.log_verbose\"\r\n value = \"1\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_statement\"\r\n value = \"ddl\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"statement_timeout\"\r\n value = \"7200000\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_lock_waits\"\r\n value = \"1\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"random_page_cost\"\r\n value = \"1\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"auto_explain.log_min_duration\"\r\n value = \"5000\"\r\n }\r\n\r\n parameter {\r\n apply_method = \"immediate\"\r\n name = \"log_rotation_size\"\r\n value = \"102400\"\r\n }\r\n}\r\n```\r\n\r\n\r\n**Expected behavior**\r\nShould not fail when `db_cluster_parameter_group_name` is defined with parameters \"log_statement\" \"log_min_duration_statement\"\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS Big Sur 11.5.2\r\n - Checkov Version 2.0.468\r\n\r\n\n", "before_files": [{"content": "import concurrent.futures\nimport re\nfrom typing import List, Tuple, Dict, Any, Optional, Pattern\n\nfrom networkx import DiGraph\n\nfrom checkov.common.graph.checks_infra.enums import SolverType\nfrom checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nWILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n\n\nclass BaseAttributeSolver(BaseSolver):\n operator = \"\"\n\n def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:\n super().__init__(SolverType.ATTRIBUTE)\n self.resource_types = resource_types\n self.attribute = attribute\n self.value = value\n\n def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n executer = ThreadPoolExecutor()\n jobs = []\n passed_vertices = []\n failed_vertices = []\n for _, data in graph_connector.nodes(data=True):\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n\n concurrent.futures.wait(jobs)\n return passed_vertices, failed_vertices\n\n def get_operation(self, vertex: Dict[str, Any]) -> bool:\n if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):\n attribute_patterns = self.get_attribute_patterns(self.attribute)\n attribute_matches = [\n attr\n for attr in vertex\n if any(re.match(attribute_pattern, attr) for attribute_pattern in attribute_patterns)\n ]\n if attribute_matches:\n return self.resource_type_pred(vertex, self.resource_types) and any(\n self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches\n )\n return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(\n vertex=vertex, attribute=self.attribute\n )\n\n def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:\n raise NotImplementedError\n\n def _process_node(self, data, passed_vartices, failed_vertices):\n if not self.resource_type_pred(data, self.resource_types):\n return\n if self.get_operation(vertex=data):\n passed_vartices.append(data)\n else:\n failed_vertices.append(data)\n\n @staticmethod\n def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:\n index_pattern = r\"[\\d+]\"\n split_by_dots = attribute.split(\".\")\n\n pattern_parts = []\n pattern_parts_without_index = []\n for attr_part in split_by_dots:\n if attr_part == \"*\":\n pattern_parts.append(index_pattern)\n else:\n attr_part_pattern = f\"({attr_part})\"\n pattern_parts.append(attr_part_pattern)\n pattern_parts_without_index.append(attr_part_pattern)\n\n pattern = \"[.]\".join(pattern_parts)\n pattern_with_index = re.compile(pattern)\n\n pattern = \"[.]\".join(pattern_parts_without_index)\n pattern_without_index = re.compile(pattern)\n\n return pattern_with_index, pattern_without_index\n", "path": "checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py"}], "after_files": [{"content": "import concurrent.futures\nimport re\nfrom typing import List, Tuple, Dict, Any, Optional, Pattern\n\nfrom networkx import DiGraph\n\nfrom checkov.common.graph.checks_infra.enums import SolverType\nfrom checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nWILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n\n\nclass BaseAttributeSolver(BaseSolver):\n operator = \"\"\n\n def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:\n super().__init__(SolverType.ATTRIBUTE)\n self.resource_types = resource_types\n self.attribute = attribute\n self.value = value\n\n def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n executer = ThreadPoolExecutor()\n jobs = []\n passed_vertices: List[Dict[str, Any]] = []\n failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n\n concurrent.futures.wait(jobs)\n return passed_vertices, failed_vertices\n\n def get_operation(self, vertex: Dict[str, Any]) -> bool:\n if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):\n attribute_patterns = self.get_attribute_patterns(self.attribute)\n attribute_matches = [\n attr\n for attr in vertex\n if any(re.match(attribute_pattern, attr) for attribute_pattern in attribute_patterns)\n ]\n if attribute_matches:\n return self.resource_type_pred(vertex, self.resource_types) and any(\n self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches\n )\n return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(\n vertex=vertex, attribute=self.attribute\n )\n\n def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:\n raise NotImplementedError\n\n def _process_node(\n self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]\n ) -> None:\n if not self.resource_type_pred(data, self.resource_types):\n return\n if self.get_operation(vertex=data):\n passed_vartices.append(data)\n else:\n failed_vertices.append(data)\n\n @staticmethod\n def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:\n index_pattern = r\"[\\d]+\"\n split_by_dots = attribute.split(\".\")\n\n pattern_parts = []\n pattern_parts_without_index = []\n for attr_part in split_by_dots:\n if attr_part == \"*\":\n pattern_parts.append(index_pattern)\n else:\n attr_part_pattern = f\"({attr_part})\"\n pattern_parts.append(attr_part_pattern)\n pattern_parts_without_index.append(attr_part_pattern)\n\n pattern = \"[.]\".join(pattern_parts)\n pattern_with_index = re.compile(pattern)\n\n pattern = \"[.]\".join(pattern_parts_without_index)\n pattern_without_index = re.compile(pattern)\n\n return pattern_with_index, pattern_without_index\n", "path": "checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py"}]} | 2,112 | 401 |
gh_patches_debug_29340 | rasdani/github-patches | git_diff | conda__conda-6275 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Trivial pip subsection breaks conda env create
I observe that an empty-but-present pip subsection under dependencies breaks `conda env create`. I expect this command to run as if the `- pip:` subsection was not present.
```
$ cat environment.yml
dependencies:
- pip:
$ conda env create -n recreate --file=environment.yml
Fetching package metadata ...........
Solving package specifications: An unexpected error has occurred.
Please consider posting the following information to the
conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Current conda install:
platform : linux-64
conda version : 4.3.13
conda is private : False
conda-env version : 4.3.13
conda-build version : 2.1.5
python version : 3.5.2.final.0
requests version : 2.13.0
root environment : /opt/ts/services/miniconda.ext_public_miniconda/dist (read only)
default environment : /opt/ts/services/miniconda.ext_public_miniconda/dist
envs directories : /nas/dft/ire/rhys/envs
/opt/ts/services/miniconda.ext_public_miniconda/dist/envs
/home/rhys/.conda/envs
package cache : /opt/ts/services/miniconda.ext_public_miniconda/dist/pkgs
/home/rhys/.conda/pkgs
channel URLs : http://python3.app.twosigma.com/conda/twosigma.com/ts/linux-64
http://python3.app.twosigma.com/conda/twosigma.com/ts/noarch
http://python3.app.twosigma.com/conda/twosigma.com/ext/linux-64
http://python3.app.twosigma.com/conda/twosigma.com/ext/noarch
http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/free/linux-64
http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/free/noarch
http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/pro/linux-64
http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/pro/noarch
config file : /home/rhys/.condarc
offline mode : False
user-agent : conda/4.3.13 requests/2.13.0 CPython/3.5.2 Linux/4.1.35-pv-ts2 debian/7.10 glibc/2.13
UID:GID : 11082:5000
`$ /opt/ts/services/miniconda.ext_public_miniconda/dist/bin/conda-env create -n recreate --file=environment.yml`
Traceback (most recent call last):
File "/opt/ts/services/miniconda.ext_public_miniconda/dist/lib/python3.5/site-packages/conda/exceptions.py", line 591, in conda_exception_handler
return_value = func(*args, **kwargs)
File "/opt/ts/services/miniconda.ext_public_miniconda/dist/lib/python3.5/site-packages/conda_env/cli/main_create.py", line 108, in execute
installer.install(prefix, pkg_specs, args, env)
File "/opt/ts/services/miniconda.ext_public_miniconda/dist/lib/python3.5/site-packages/conda_env/installers/pip.py", line 8, in install
pip_cmd = pip_args(prefix) + ['install', ] + specs
TypeError: unsupported operand type(s) for +: 'NoneType' and 'list'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_env/cli/main_create.py`
Content:
```
1 from __future__ import print_function
2
3 from argparse import RawDescriptionHelpFormatter
4 import os
5 import sys
6 import textwrap
7
8 from conda._vendor.auxlib.path import expand
9 from conda.cli import install as cli_install
10 from conda.cli.conda_argparse import add_parser_json, add_parser_prefix
11 from conda.gateways.disk.delete import rm_rf
12 from conda.misc import touch_nonadmin
13 from .common import get_prefix
14 from .. import exceptions, specs
15 from ..installers.base import InvalidInstaller, get_installer
16
17 description = """
18 Create an environment based on an environment file
19 """
20
21 example = """
22 examples:
23 conda env create
24 conda env create -n name
25 conda env create vader/deathstar
26 conda env create -f=/path/to/environment.yml
27 conda env create -f=/path/to/requirements.txt -n deathstar
28 conda env create -f=/path/to/requirements.txt -p /home/user/software/deathstar
29 """
30
31
32 def configure_parser(sub_parsers):
33 p = sub_parsers.add_parser(
34 'create',
35 formatter_class=RawDescriptionHelpFormatter,
36 description=description,
37 help=description,
38 epilog=example,
39 )
40 p.add_argument(
41 '-f', '--file',
42 action='store',
43 help='environment definition file (default: environment.yml)',
44 default='environment.yml',
45 )
46
47 # Add name and prefix args
48 add_parser_prefix(p)
49
50 p.add_argument(
51 '-q', '--quiet',
52 action='store_true',
53 default=False,
54 )
55 p.add_argument(
56 'remote_definition',
57 help='remote environment definition / IPython notebook',
58 action='store',
59 default=None,
60 nargs='?'
61 )
62 p.add_argument(
63 '--force',
64 help=('force creation of environment (removing a previously existing '
65 'environment of the same name).'),
66 action='store_true',
67 default=False,
68 )
69 add_parser_json(p)
70 p.set_defaults(func=execute)
71
72
73 def execute(args, parser):
74 from conda.base.context import context
75 name = args.remote_definition or args.name
76
77 try:
78 spec = specs.detect(name=name, filename=expand(args.file),
79 directory=os.getcwd())
80 env = spec.environment
81
82 # FIXME conda code currently requires args to have a name or prefix
83 # don't overwrite name if it's given. gh-254
84 if args.prefix is None and args.name is None:
85 args.name = env.name
86
87 except exceptions.SpecNotFound:
88 raise
89
90 prefix = get_prefix(args, search=False)
91
92 if args.force and prefix != context.root_prefix and os.path.exists(prefix):
93 rm_rf(prefix)
94 cli_install.check_prefix(prefix, json=args.json)
95
96 # TODO, add capability
97 # common.ensure_override_channels_requires_channel(args)
98 # channel_urls = args.channel or ()
99
100 # # special case for empty environment
101 # if not env.dependencies:
102 # from conda.install import symlink_conda
103 # symlink_conda(prefix, context.root_dir)
104
105 for installer_type, pkg_specs in env.dependencies.items():
106 try:
107 installer = get_installer(installer_type)
108 installer.install(prefix, pkg_specs, args, env)
109 except InvalidInstaller:
110 sys.stderr.write(textwrap.dedent("""
111 Unable to install package for {0}.
112
113 Please double check and ensure you dependencies file has
114 the correct spelling. You might also try installing the
115 conda-env-{0} package to see if provides the required
116 installer.
117 """).lstrip().format(installer_type)
118 )
119 return -1
120
121 touch_nonadmin(prefix)
122 cli_install.print_activate(args.name if args.name else prefix)
123
```
Path: `conda_env/env.py`
Content:
```
1 from __future__ import absolute_import, print_function
2
3 from collections import OrderedDict
4 from copy import copy
5 from itertools import chain
6 import os
7
8 from conda.base.context import context
9 from conda.cli import common # TODO: this should never have to import form conda.cli
10 from conda.common.serialize import yaml_load
11 from conda.core.linked_data import linked
12 from conda_env.yaml import dump
13 from . import compat, exceptions, yaml
14 from .pip_util import add_pip_installed
15
16
17 def load_from_directory(directory):
18 """Load and return an ``Environment`` from a given ``directory``"""
19 files = ['environment.yml', 'environment.yaml']
20 while True:
21 for f in files:
22 try:
23 return from_file(os.path.join(directory, f))
24 except exceptions.EnvironmentFileNotFound:
25 pass
26 old_directory = directory
27 directory = os.path.dirname(directory)
28 if directory == old_directory:
29 break
30 raise exceptions.EnvironmentFileNotFound(files[0])
31
32
33 # TODO This should lean more on conda instead of divining it from the outside
34 # TODO tests!!!
35 def from_environment(name, prefix, no_builds=False, ignore_channels=False):
36 """
37 Get environment object from prefix
38 Args:
39 name: The name of environment
40 prefix: The path of prefix
41 no_builds: Whether has build requirement
42 ignore_channels: whether ignore_channels
43
44 Returns: Environment object
45 """
46 installed = linked(prefix, ignore_channels=ignore_channels)
47 conda_pkgs = copy(installed)
48 # json=True hides the output, data is added to installed
49 add_pip_installed(prefix, installed, json=True)
50
51 pip_pkgs = sorted(installed - conda_pkgs)
52
53 if no_builds:
54 dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]
55 else:
56 dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]
57 if len(pip_pkgs) > 0:
58 dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})
59 # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq
60 # this doesn't dump correctly using pyyaml
61 channels = list(context.channels)
62 if not ignore_channels:
63 for dist in conda_pkgs:
64 if dist.channel not in channels:
65 channels.insert(0, dist.channel)
66 return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)
67
68
69 def from_yaml(yamlstr, **kwargs):
70 """Load and return a ``Environment`` from a given ``yaml string``"""
71 data = yaml_load(yamlstr)
72 if kwargs is not None:
73 for key, value in kwargs.items():
74 data[key] = value
75 return Environment(**data)
76
77
78 def from_file(filename):
79 if not os.path.exists(filename):
80 raise exceptions.EnvironmentFileNotFound(filename)
81 with open(filename, 'r') as fp:
82 yamlstr = fp.read()
83 return from_yaml(yamlstr, filename=filename)
84
85
86 # TODO test explicitly
87 class Dependencies(OrderedDict):
88 def __init__(self, raw, *args, **kwargs):
89 super(Dependencies, self).__init__(*args, **kwargs)
90 self.raw = raw
91 self.parse()
92
93 def parse(self):
94 if not self.raw:
95 return
96
97 self.update({'conda': []})
98
99 for line in self.raw:
100 if isinstance(line, dict):
101 self.update(line)
102 else:
103 self['conda'].append(common.arg2spec(line))
104
105 # TODO only append when it's not already present
106 def add(self, package_name):
107 self.raw.append(package_name)
108 self.parse()
109
110
111 def unique(seq, key=None):
112 """ Return only unique elements of a sequence
113 >>> tuple(unique((1, 2, 3)))
114 (1, 2, 3)
115 >>> tuple(unique((1, 2, 1, 3)))
116 (1, 2, 3)
117 Uniqueness can be defined by key keyword
118 >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))
119 ('cat', 'mouse')
120 """
121 seen = set()
122 seen_add = seen.add
123 if key is None:
124 for item in seq:
125 if item not in seen:
126 seen_add(item)
127 yield item
128 else: # calculate key
129 for item in seq:
130 val = key(item)
131 if val not in seen:
132 seen_add(val)
133 yield item
134
135
136 class Environment(object):
137 def __init__(self, name=None, filename=None, channels=None,
138 dependencies=None, prefix=None):
139 self.name = name
140 self.filename = filename
141 self.prefix = prefix
142 self.dependencies = Dependencies(dependencies)
143
144 if channels is None:
145 channels = []
146 self.channels = channels
147
148 def add_channels(self, channels):
149 self.channels = list(unique(chain.from_iterable((channels, self.channels))))
150
151 def remove_channels(self):
152 self.channels = []
153
154 def to_dict(self):
155 d = yaml.dict([('name', self.name)])
156 if self.channels:
157 d['channels'] = self.channels
158 if self.dependencies:
159 d['dependencies'] = self.dependencies.raw
160 if self.prefix:
161 d['prefix'] = self.prefix
162 return d
163
164 def to_yaml(self, stream=None):
165 d = self.to_dict()
166 out = compat.u(dump(d))
167 if stream is None:
168 return out
169 stream.write(compat.b(out, encoding="utf-8"))
170
171 def save(self):
172 with open(self.filename, "wb") as fp:
173 self.to_yaml(stream=fp)
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_env/cli/main_create.py b/conda_env/cli/main_create.py
--- a/conda_env/cli/main_create.py
+++ b/conda_env/cli/main_create.py
@@ -110,7 +110,7 @@
sys.stderr.write(textwrap.dedent("""
Unable to install package for {0}.
- Please double check and ensure you dependencies file has
+ Please double check and ensure your dependencies file has
the correct spelling. You might also try installing the
conda-env-{0} package to see if provides the required
installer.
diff --git a/conda_env/env.py b/conda_env/env.py
--- a/conda_env/env.py
+++ b/conda_env/env.py
@@ -9,6 +9,7 @@
from conda.cli import common # TODO: this should never have to import form conda.cli
from conda.common.serialize import yaml_load
from conda.core.linked_data import linked
+from conda.models.match_spec import MatchSpec
from conda_env.yaml import dump
from . import compat, exceptions, yaml
from .pip_util import add_pip_installed
@@ -102,6 +103,12 @@
else:
self['conda'].append(common.arg2spec(line))
+ if 'pip' in self:
+ if not self['pip']:
+ del self['pip']
+ if not any(MatchSpec(s).name == 'pip' for s in self['conda']):
+ self['conda'].append('pip')
+
# TODO only append when it's not already present
def add(self, package_name):
self.raw.append(package_name)
| {"golden_diff": "diff --git a/conda_env/cli/main_create.py b/conda_env/cli/main_create.py\n--- a/conda_env/cli/main_create.py\n+++ b/conda_env/cli/main_create.py\n@@ -110,7 +110,7 @@\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n \n- Please double check and ensure you dependencies file has\n+ Please double check and ensure your dependencies file has\n the correct spelling. You might also try installing the\n conda-env-{0} package to see if provides the required\n installer.\ndiff --git a/conda_env/env.py b/conda_env/env.py\n--- a/conda_env/env.py\n+++ b/conda_env/env.py\n@@ -9,6 +9,7 @@\n from conda.cli import common # TODO: this should never have to import form conda.cli\n from conda.common.serialize import yaml_load\n from conda.core.linked_data import linked\n+from conda.models.match_spec import MatchSpec\n from conda_env.yaml import dump\n from . import compat, exceptions, yaml\n from .pip_util import add_pip_installed\n@@ -102,6 +103,12 @@\n else:\n self['conda'].append(common.arg2spec(line))\n \n+ if 'pip' in self:\n+ if not self['pip']:\n+ del self['pip']\n+ if not any(MatchSpec(s).name == 'pip' for s in self['conda']):\n+ self['conda'].append('pip')\n+\n # TODO only append when it's not already present\n def add(self, package_name):\n self.raw.append(package_name)\n", "issue": "Trivial pip subsection breaks conda env create\nI observe that an empty-but-present pip subsection under dependencies breaks `conda env create`. I expect this command to run as if the `- pip:` subsection was not present.\r\n\r\n```\r\n$ cat environment.yml \r\ndependencies:\r\n- pip:\r\n\r\n$ conda env create -n recreate --file=environment.yml\r\nFetching package metadata ...........\r\nSolving package specifications: An unexpected error has occurred.\r\nPlease consider posting the following information to the\r\nconda GitHub issue tracker at:\r\n\r\n https://github.com/conda/conda/issues\r\n\r\nCurrent conda install:\r\n\r\n platform : linux-64\r\n conda version : 4.3.13\r\n conda is private : False\r\n conda-env version : 4.3.13\r\n conda-build version : 2.1.5\r\n python version : 3.5.2.final.0\r\n requests version : 2.13.0\r\n root environment : /opt/ts/services/miniconda.ext_public_miniconda/dist (read only)\r\n default environment : /opt/ts/services/miniconda.ext_public_miniconda/dist\r\n envs directories : /nas/dft/ire/rhys/envs\r\n /opt/ts/services/miniconda.ext_public_miniconda/dist/envs\r\n /home/rhys/.conda/envs\r\n package cache : /opt/ts/services/miniconda.ext_public_miniconda/dist/pkgs\r\n /home/rhys/.conda/pkgs\r\n channel URLs : http://python3.app.twosigma.com/conda/twosigma.com/ts/linux-64\r\n http://python3.app.twosigma.com/conda/twosigma.com/ts/noarch\r\n http://python3.app.twosigma.com/conda/twosigma.com/ext/linux-64\r\n http://python3.app.twosigma.com/conda/twosigma.com/ext/noarch\r\n http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/free/linux-64\r\n http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/free/noarch\r\n http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/pro/linux-64\r\n http://python3.app.twosigma.com/conda/repo.continuum.io/pkgs/pro/noarch\r\n config file : /home/rhys/.condarc\r\n offline mode : False\r\n user-agent : conda/4.3.13 requests/2.13.0 CPython/3.5.2 Linux/4.1.35-pv-ts2 debian/7.10 glibc/2.13\r\n UID:GID : 11082:5000\r\n\r\n`$ /opt/ts/services/miniconda.ext_public_miniconda/dist/bin/conda-env create -n recreate --file=environment.yml`\r\n\r\n Traceback (most recent call last):\r\n File \"/opt/ts/services/miniconda.ext_public_miniconda/dist/lib/python3.5/site-packages/conda/exceptions.py\", line 591, in conda_exception_handler\r\n return_value = func(*args, **kwargs)\r\n File \"/opt/ts/services/miniconda.ext_public_miniconda/dist/lib/python3.5/site-packages/conda_env/cli/main_create.py\", line 108, in execute\r\n installer.install(prefix, pkg_specs, args, env)\r\n File \"/opt/ts/services/miniconda.ext_public_miniconda/dist/lib/python3.5/site-packages/conda_env/installers/pip.py\", line 8, in install\r\n pip_cmd = pip_args(prefix) + ['install', ] + specs\r\n TypeError: unsupported operand type(s) for +: 'NoneType' and 'list'\r\n```\n", "before_files": [{"content": "from __future__ import print_function\n\nfrom argparse import RawDescriptionHelpFormatter\nimport os\nimport sys\nimport textwrap\n\nfrom conda._vendor.auxlib.path import expand\nfrom conda.cli import install as cli_install\nfrom conda.cli.conda_argparse import add_parser_json, add_parser_prefix\nfrom conda.gateways.disk.delete import rm_rf\nfrom conda.misc import touch_nonadmin\nfrom .common import get_prefix\nfrom .. import exceptions, specs\nfrom ..installers.base import InvalidInstaller, get_installer\n\ndescription = \"\"\"\nCreate an environment based on an environment file\n\"\"\"\n\nexample = \"\"\"\nexamples:\n conda env create\n conda env create -n name\n conda env create vader/deathstar\n conda env create -f=/path/to/environment.yml\n conda env create -f=/path/to/requirements.txt -n deathstar\n conda env create -f=/path/to/requirements.txt -p /home/user/software/deathstar\n\"\"\"\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'create',\n formatter_class=RawDescriptionHelpFormatter,\n description=description,\n help=description,\n epilog=example,\n )\n p.add_argument(\n '-f', '--file',\n action='store',\n help='environment definition file (default: environment.yml)',\n default='environment.yml',\n )\n\n # Add name and prefix args\n add_parser_prefix(p)\n\n p.add_argument(\n '-q', '--quiet',\n action='store_true',\n default=False,\n )\n p.add_argument(\n 'remote_definition',\n help='remote environment definition / IPython notebook',\n action='store',\n default=None,\n nargs='?'\n )\n p.add_argument(\n '--force',\n help=('force creation of environment (removing a previously existing '\n 'environment of the same name).'),\n action='store_true',\n default=False,\n )\n add_parser_json(p)\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n from conda.base.context import context\n name = args.remote_definition or args.name\n\n try:\n spec = specs.detect(name=name, filename=expand(args.file),\n directory=os.getcwd())\n env = spec.environment\n\n # FIXME conda code currently requires args to have a name or prefix\n # don't overwrite name if it's given. gh-254\n if args.prefix is None and args.name is None:\n args.name = env.name\n\n except exceptions.SpecNotFound:\n raise\n\n prefix = get_prefix(args, search=False)\n\n if args.force and prefix != context.root_prefix and os.path.exists(prefix):\n rm_rf(prefix)\n cli_install.check_prefix(prefix, json=args.json)\n\n # TODO, add capability\n # common.ensure_override_channels_requires_channel(args)\n # channel_urls = args.channel or ()\n\n # # special case for empty environment\n # if not env.dependencies:\n # from conda.install import symlink_conda\n # symlink_conda(prefix, context.root_dir)\n\n for installer_type, pkg_specs in env.dependencies.items():\n try:\n installer = get_installer(installer_type)\n installer.install(prefix, pkg_specs, args, env)\n except InvalidInstaller:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n\n Please double check and ensure you dependencies file has\n the correct spelling. You might also try installing the\n conda-env-{0} package to see if provides the required\n installer.\n \"\"\").lstrip().format(installer_type)\n )\n return -1\n\n touch_nonadmin(prefix)\n cli_install.print_activate(args.name if args.name else prefix)\n", "path": "conda_env/cli/main_create.py"}, {"content": "from __future__ import absolute_import, print_function\n\nfrom collections import OrderedDict\nfrom copy import copy\nfrom itertools import chain\nimport os\n\nfrom conda.base.context import context\nfrom conda.cli import common # TODO: this should never have to import form conda.cli\nfrom conda.common.serialize import yaml_load\nfrom conda.core.linked_data import linked\nfrom conda_env.yaml import dump\nfrom . import compat, exceptions, yaml\nfrom .pip_util import add_pip_installed\n\n\ndef load_from_directory(directory):\n \"\"\"Load and return an ``Environment`` from a given ``directory``\"\"\"\n files = ['environment.yml', 'environment.yaml']\n while True:\n for f in files:\n try:\n return from_file(os.path.join(directory, f))\n except exceptions.EnvironmentFileNotFound:\n pass\n old_directory = directory\n directory = os.path.dirname(directory)\n if directory == old_directory:\n break\n raise exceptions.EnvironmentFileNotFound(files[0])\n\n\n# TODO This should lean more on conda instead of divining it from the outside\n# TODO tests!!!\ndef from_environment(name, prefix, no_builds=False, ignore_channels=False):\n \"\"\"\n Get environment object from prefix\n Args:\n name: The name of environment\n prefix: The path of prefix\n no_builds: Whether has build requirement\n ignore_channels: whether ignore_channels\n\n Returns: Environment object\n \"\"\"\n installed = linked(prefix, ignore_channels=ignore_channels)\n conda_pkgs = copy(installed)\n # json=True hides the output, data is added to installed\n add_pip_installed(prefix, installed, json=True)\n\n pip_pkgs = sorted(installed - conda_pkgs)\n\n if no_builds:\n dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]\n else:\n dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n # this doesn't dump correctly using pyyaml\n channels = list(context.channels)\n if not ignore_channels:\n for dist in conda_pkgs:\n if dist.channel not in channels:\n channels.insert(0, dist.channel)\n return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)\n\n\ndef from_yaml(yamlstr, **kwargs):\n \"\"\"Load and return a ``Environment`` from a given ``yaml string``\"\"\"\n data = yaml_load(yamlstr)\n if kwargs is not None:\n for key, value in kwargs.items():\n data[key] = value\n return Environment(**data)\n\n\ndef from_file(filename):\n if not os.path.exists(filename):\n raise exceptions.EnvironmentFileNotFound(filename)\n with open(filename, 'r') as fp:\n yamlstr = fp.read()\n return from_yaml(yamlstr, filename=filename)\n\n\n# TODO test explicitly\nclass Dependencies(OrderedDict):\n def __init__(self, raw, *args, **kwargs):\n super(Dependencies, self).__init__(*args, **kwargs)\n self.raw = raw\n self.parse()\n\n def parse(self):\n if not self.raw:\n return\n\n self.update({'conda': []})\n\n for line in self.raw:\n if isinstance(line, dict):\n self.update(line)\n else:\n self['conda'].append(common.arg2spec(line))\n\n # TODO only append when it's not already present\n def add(self, package_name):\n self.raw.append(package_name)\n self.parse()\n\n\ndef unique(seq, key=None):\n \"\"\" Return only unique elements of a sequence\n >>> tuple(unique((1, 2, 3)))\n (1, 2, 3)\n >>> tuple(unique((1, 2, 1, 3)))\n (1, 2, 3)\n Uniqueness can be defined by key keyword\n >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))\n ('cat', 'mouse')\n \"\"\"\n seen = set()\n seen_add = seen.add\n if key is None:\n for item in seq:\n if item not in seen:\n seen_add(item)\n yield item\n else: # calculate key\n for item in seq:\n val = key(item)\n if val not in seen:\n seen_add(val)\n yield item\n\n\nclass Environment(object):\n def __init__(self, name=None, filename=None, channels=None,\n dependencies=None, prefix=None):\n self.name = name\n self.filename = filename\n self.prefix = prefix\n self.dependencies = Dependencies(dependencies)\n\n if channels is None:\n channels = []\n self.channels = channels\n\n def add_channels(self, channels):\n self.channels = list(unique(chain.from_iterable((channels, self.channels))))\n\n def remove_channels(self):\n self.channels = []\n\n def to_dict(self):\n d = yaml.dict([('name', self.name)])\n if self.channels:\n d['channels'] = self.channels\n if self.dependencies:\n d['dependencies'] = self.dependencies.raw\n if self.prefix:\n d['prefix'] = self.prefix\n return d\n\n def to_yaml(self, stream=None):\n d = self.to_dict()\n out = compat.u(dump(d))\n if stream is None:\n return out\n stream.write(compat.b(out, encoding=\"utf-8\"))\n\n def save(self):\n with open(self.filename, \"wb\") as fp:\n self.to_yaml(stream=fp)\n", "path": "conda_env/env.py"}], "after_files": [{"content": "from __future__ import print_function\n\nfrom argparse import RawDescriptionHelpFormatter\nimport os\nimport sys\nimport textwrap\n\nfrom conda._vendor.auxlib.path import expand\nfrom conda.cli import install as cli_install\nfrom conda.cli.conda_argparse import add_parser_json, add_parser_prefix\nfrom conda.gateways.disk.delete import rm_rf\nfrom conda.misc import touch_nonadmin\nfrom .common import get_prefix\nfrom .. import exceptions, specs\nfrom ..installers.base import InvalidInstaller, get_installer\n\ndescription = \"\"\"\nCreate an environment based on an environment file\n\"\"\"\n\nexample = \"\"\"\nexamples:\n conda env create\n conda env create -n name\n conda env create vader/deathstar\n conda env create -f=/path/to/environment.yml\n conda env create -f=/path/to/requirements.txt -n deathstar\n conda env create -f=/path/to/requirements.txt -p /home/user/software/deathstar\n\"\"\"\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'create',\n formatter_class=RawDescriptionHelpFormatter,\n description=description,\n help=description,\n epilog=example,\n )\n p.add_argument(\n '-f', '--file',\n action='store',\n help='environment definition file (default: environment.yml)',\n default='environment.yml',\n )\n\n # Add name and prefix args\n add_parser_prefix(p)\n\n p.add_argument(\n '-q', '--quiet',\n action='store_true',\n default=False,\n )\n p.add_argument(\n 'remote_definition',\n help='remote environment definition / IPython notebook',\n action='store',\n default=None,\n nargs='?'\n )\n p.add_argument(\n '--force',\n help=('force creation of environment (removing a previously existing '\n 'environment of the same name).'),\n action='store_true',\n default=False,\n )\n add_parser_json(p)\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n from conda.base.context import context\n name = args.remote_definition or args.name\n\n try:\n spec = specs.detect(name=name, filename=expand(args.file),\n directory=os.getcwd())\n env = spec.environment\n\n # FIXME conda code currently requires args to have a name or prefix\n # don't overwrite name if it's given. gh-254\n if args.prefix is None and args.name is None:\n args.name = env.name\n\n except exceptions.SpecNotFound:\n raise\n\n prefix = get_prefix(args, search=False)\n\n if args.force and prefix != context.root_prefix and os.path.exists(prefix):\n rm_rf(prefix)\n cli_install.check_prefix(prefix, json=args.json)\n\n # TODO, add capability\n # common.ensure_override_channels_requires_channel(args)\n # channel_urls = args.channel or ()\n\n # # special case for empty environment\n # if not env.dependencies:\n # from conda.install import symlink_conda\n # symlink_conda(prefix, context.root_dir)\n\n for installer_type, pkg_specs in env.dependencies.items():\n try:\n installer = get_installer(installer_type)\n installer.install(prefix, pkg_specs, args, env)\n except InvalidInstaller:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n\n Please double check and ensure your dependencies file has\n the correct spelling. You might also try installing the\n conda-env-{0} package to see if provides the required\n installer.\n \"\"\").lstrip().format(installer_type)\n )\n return -1\n\n touch_nonadmin(prefix)\n cli_install.print_activate(args.name if args.name else prefix)\n", "path": "conda_env/cli/main_create.py"}, {"content": "from __future__ import absolute_import, print_function\n\nfrom collections import OrderedDict\nfrom copy import copy\nfrom itertools import chain\nimport os\n\nfrom conda.base.context import context\nfrom conda.cli import common # TODO: this should never have to import form conda.cli\nfrom conda.common.serialize import yaml_load\nfrom conda.core.linked_data import linked\nfrom conda.models.match_spec import MatchSpec\nfrom conda_env.yaml import dump\nfrom . import compat, exceptions, yaml\nfrom .pip_util import add_pip_installed\n\n\ndef load_from_directory(directory):\n \"\"\"Load and return an ``Environment`` from a given ``directory``\"\"\"\n files = ['environment.yml', 'environment.yaml']\n while True:\n for f in files:\n try:\n return from_file(os.path.join(directory, f))\n except exceptions.EnvironmentFileNotFound:\n pass\n old_directory = directory\n directory = os.path.dirname(directory)\n if directory == old_directory:\n break\n raise exceptions.EnvironmentFileNotFound(files[0])\n\n\n# TODO This should lean more on conda instead of divining it from the outside\n# TODO tests!!!\ndef from_environment(name, prefix, no_builds=False, ignore_channels=False):\n \"\"\"\n Get environment object from prefix\n Args:\n name: The name of environment\n prefix: The path of prefix\n no_builds: Whether has build requirement\n ignore_channels: whether ignore_channels\n\n Returns: Environment object\n \"\"\"\n installed = linked(prefix, ignore_channels=ignore_channels)\n conda_pkgs = copy(installed)\n # json=True hides the output, data is added to installed\n add_pip_installed(prefix, installed, json=True)\n\n pip_pkgs = sorted(installed - conda_pkgs)\n\n if no_builds:\n dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]\n else:\n dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n # this doesn't dump correctly using pyyaml\n channels = list(context.channels)\n if not ignore_channels:\n for dist in conda_pkgs:\n if dist.channel not in channels:\n channels.insert(0, dist.channel)\n return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)\n\n\ndef from_yaml(yamlstr, **kwargs):\n \"\"\"Load and return a ``Environment`` from a given ``yaml string``\"\"\"\n data = yaml_load(yamlstr)\n if kwargs is not None:\n for key, value in kwargs.items():\n data[key] = value\n return Environment(**data)\n\n\ndef from_file(filename):\n if not os.path.exists(filename):\n raise exceptions.EnvironmentFileNotFound(filename)\n with open(filename, 'r') as fp:\n yamlstr = fp.read()\n return from_yaml(yamlstr, filename=filename)\n\n\n# TODO test explicitly\nclass Dependencies(OrderedDict):\n def __init__(self, raw, *args, **kwargs):\n super(Dependencies, self).__init__(*args, **kwargs)\n self.raw = raw\n self.parse()\n\n def parse(self):\n if not self.raw:\n return\n\n self.update({'conda': []})\n\n for line in self.raw:\n if isinstance(line, dict):\n self.update(line)\n else:\n self['conda'].append(common.arg2spec(line))\n\n if 'pip' in self:\n if not self['pip']:\n del self['pip']\n if not any(MatchSpec(s).name == 'pip' for s in self['conda']):\n self['conda'].append('pip')\n\n # TODO only append when it's not already present\n def add(self, package_name):\n self.raw.append(package_name)\n self.parse()\n\n\ndef unique(seq, key=None):\n \"\"\" Return only unique elements of a sequence\n >>> tuple(unique((1, 2, 3)))\n (1, 2, 3)\n >>> tuple(unique((1, 2, 1, 3)))\n (1, 2, 3)\n Uniqueness can be defined by key keyword\n >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))\n ('cat', 'mouse')\n \"\"\"\n seen = set()\n seen_add = seen.add\n if key is None:\n for item in seq:\n if item not in seen:\n seen_add(item)\n yield item\n else: # calculate key\n for item in seq:\n val = key(item)\n if val not in seen:\n seen_add(val)\n yield item\n\n\nclass Environment(object):\n def __init__(self, name=None, filename=None, channels=None,\n dependencies=None, prefix=None):\n self.name = name\n self.filename = filename\n self.prefix = prefix\n self.dependencies = Dependencies(dependencies)\n\n if channels is None:\n channels = []\n self.channels = channels\n\n def add_channels(self, channels):\n self.channels = list(unique(chain.from_iterable((channels, self.channels))))\n\n def remove_channels(self):\n self.channels = []\n\n def to_dict(self):\n d = yaml.dict([('name', self.name)])\n if self.channels:\n d['channels'] = self.channels\n if self.dependencies:\n d['dependencies'] = self.dependencies.raw\n if self.prefix:\n d['prefix'] = self.prefix\n return d\n\n def to_yaml(self, stream=None):\n d = self.to_dict()\n out = compat.u(dump(d))\n if stream is None:\n return out\n stream.write(compat.b(out, encoding=\"utf-8\"))\n\n def save(self):\n with open(self.filename, \"wb\") as fp:\n self.to_yaml(stream=fp)\n", "path": "conda_env/env.py"}]} | 3,829 | 364 |
gh_patches_debug_57408 | rasdani/github-patches | git_diff | kornia__kornia-1861 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bugs when using RandomRGBShift
### Describe the bug
When running RandomRGBShift augmentation on gpus I came across this error
```
File "/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py", line 100, in apply_transform
return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])
File "/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py", line 17, in shift_rgb
shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
ValueError: only one element tensors can be converted to Python scalars
```
after checking the code I think the problem is that
```
shifts = [params['r_shift'], params['g_shift'], params['b_shift']]
shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
```
and also I am not sure about `.view(1,3,1,1)` shouldn't it be `.view(-1,3,1,1)`
### Reproduction steps
```bash
1. setting up RandomRGBShift augmentation
2. Using RandomRGBShift augmentation with gpus and batchsize > 1
```
### Expected behavior
```
File "/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py", line 100, in apply_transform
return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])
File "/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py", line 17, in shift_rgb
shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
ValueError: only one element tensors can be converted to Python scalars
```
### Environment
```shell
- PyTorch Version : 1.10
- Linux
- How you installed PyTorch (`pip`):
- Python version: 3.8
- cuda:11.3.0-cudnn8
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/enhance/shift_rgb.py`
Content:
```
1 import torch
2
3 from kornia.testing import KORNIA_CHECK_IS_COLOR, KORNIA_CHECK_IS_TENSOR
4
5
6 def shift_rgb(image: torch.Tensor, r_shift: torch.Tensor, g_shift: torch.Tensor, b_shift: torch.Tensor) -> torch.Tensor:
7 """Shift rgb channels.
8
9 Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.
10 """
11
12 KORNIA_CHECK_IS_TENSOR(image)
13 KORNIA_CHECK_IS_COLOR(image, f"with shape {image.shape}")
14
15 shifts = [r_shift, g_shift, b_shift]
16
17 shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
18
19 return shifted
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/enhance/shift_rgb.py b/kornia/enhance/shift_rgb.py
--- a/kornia/enhance/shift_rgb.py
+++ b/kornia/enhance/shift_rgb.py
@@ -14,6 +14,6 @@
shifts = [r_shift, g_shift, b_shift]
- shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
+ shifted = (image + torch.stack(shifts).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
return shifted
| {"golden_diff": "diff --git a/kornia/enhance/shift_rgb.py b/kornia/enhance/shift_rgb.py\n--- a/kornia/enhance/shift_rgb.py\n+++ b/kornia/enhance/shift_rgb.py\n@@ -14,6 +14,6 @@\n \n shifts = [r_shift, g_shift, b_shift]\n \n- shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n+ shifted = (image + torch.stack(shifts).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n \n return shifted\n", "issue": "Bugs when using RandomRGBShift\n### Describe the bug\r\n\r\nWhen running RandomRGBShift augmentation on gpus I came across this error \r\n\r\n```\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py\", line 100, in apply_transform\r\n return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py\", line 17, in shift_rgb\r\n shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\r\nValueError: only one element tensors can be converted to Python scalars\r\n```\r\nafter checking the code I think the problem is that \r\n```\r\nshifts = [params['r_shift'], params['g_shift'], params['b_shift']]\r\nshifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\r\n```\r\nand also I am not sure about `.view(1,3,1,1)` shouldn't it be `.view(-1,3,1,1)`\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\n1. setting up RandomRGBShift augmentation\r\n2. Using RandomRGBShift augmentation with gpus and batchsize > 1\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\n```\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py\", line 100, in apply_transform\r\n return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py\", line 17, in shift_rgb\r\n shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\r\nValueError: only one element tensors can be converted to Python scalars\r\n```\r\n\r\n### Environment\r\n\r\n```shell\r\n- PyTorch Version : 1.10\r\n- Linux\r\n- How you installed PyTorch (`pip`):\r\n- Python version: 3.8\r\n- cuda:11.3.0-cudnn8\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import torch\n\nfrom kornia.testing import KORNIA_CHECK_IS_COLOR, KORNIA_CHECK_IS_TENSOR\n\n\ndef shift_rgb(image: torch.Tensor, r_shift: torch.Tensor, g_shift: torch.Tensor, b_shift: torch.Tensor) -> torch.Tensor:\n \"\"\"Shift rgb channels.\n\n Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.\n \"\"\"\n\n KORNIA_CHECK_IS_TENSOR(image)\n KORNIA_CHECK_IS_COLOR(image, f\"with shape {image.shape}\")\n\n shifts = [r_shift, g_shift, b_shift]\n\n shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n\n return shifted\n", "path": "kornia/enhance/shift_rgb.py"}], "after_files": [{"content": "import torch\n\nfrom kornia.testing import KORNIA_CHECK_IS_COLOR, KORNIA_CHECK_IS_TENSOR\n\n\ndef shift_rgb(image: torch.Tensor, r_shift: torch.Tensor, g_shift: torch.Tensor, b_shift: torch.Tensor) -> torch.Tensor:\n \"\"\"Shift rgb channels.\n\n Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.\n \"\"\"\n\n KORNIA_CHECK_IS_TENSOR(image)\n KORNIA_CHECK_IS_COLOR(image, f\"with shape {image.shape}\")\n\n shifts = [r_shift, g_shift, b_shift]\n\n shifted = (image + torch.stack(shifts).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n\n return shifted\n", "path": "kornia/enhance/shift_rgb.py"}]} | 1,002 | 160 |
gh_patches_debug_14369 | rasdani/github-patches | git_diff | Pycord-Development__pycord-903 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Options with a default value of None still require the "required" kwarg
### Summary
When using `None` as the default value, `required` defaults to True even though there _is_ a default value.
### Reproduction Steps
- Make an application command (slash command) with at least one Option
- Give the option a default value of None, and see that the field is **required** in Discord (shows a red error message when trying to send the command)
- Change the default value to something else, and see that the field is now no longer required in Discord
### Minimal Reproducible Code
```python
import discord
from discord.commands import Option
bot = discord.Bot()
@bot.slash_command()
async def test(ctx, arg: Option(int, default=None)):
pass
bot.run(...)
```
### Expected Results
If any value at all is passed, `required` should be `False`, no matter what that value may be. `None` is a pretty common use case and shouldn't always signify that there is **no** default value.
### Actual Results
If the value passed is `None`, it is treated as if no default value is present.
Error in Discord itself: **This option is required. Specify a value.**
To allow this default value to be used, the Option would have to be specified as
```py
Option(int, default=None, required=False)
```
which feels off, if `default` is present then `required` shouldn't be.
### Intents
default + members
### System Information
- Python v3.9.5-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b1
- aiohttp v3.7.4.post0
- system info: Linux 4.15.0-58-generic #64-Ubuntu SMP Tue Aug 6 11:12:41 UTC 2019
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
This is caused by https://github.com/Pycord-Development/pycord/blob/68e9cf3db5c311153899a0c577c68d88c0feb4cf/discord/commands/options.py#L88-L91
where it's merely checked to be `None`. A better solution would be to check if it's **in** the kwargs dict, no matter the value (which would allow `None`), and *then* pop it. When popping it, the default value is `None`, making it impossible to differentiate from actually being *passed* or being *absent*.
```py
# using "in" instead of popping the value with a default of None
# which would allow None as a default
self.required: bool = (
kwargs.pop("required", True) if "default" not in kwargs else False
)
# Popping the default value AFTERWARDS so that it doesn't matter if it was already None or not
self.default = kwargs.pop("default", None)
```
If you think this would fix the issue, I'd be more than happy to make the PR for it myself if necessary.
Options with a default value of None still require the "required" kwarg
### Summary
When using `None` as the default value, `required` defaults to True even though there _is_ a default value.
### Reproduction Steps
- Make an application command (slash command) with at least one Option
- Give the option a default value of None, and see that the field is **required** in Discord (shows a red error message when trying to send the command)
- Change the default value to something else, and see that the field is now no longer required in Discord
### Minimal Reproducible Code
```python
import discord
from discord.commands import Option
bot = discord.Bot()
@bot.slash_command()
async def test(ctx, arg: Option(int, default=None)):
pass
bot.run(...)
```
### Expected Results
If any value at all is passed, `required` should be `False`, no matter what that value may be. `None` is a pretty common use case and shouldn't always signify that there is **no** default value.
### Actual Results
If the value passed is `None`, it is treated as if no default value is present.
Error in Discord itself: **This option is required. Specify a value.**
To allow this default value to be used, the Option would have to be specified as
```py
Option(int, default=None, required=False)
```
which feels off, if `default` is present then `required` shouldn't be.
### Intents
default + members
### System Information
- Python v3.9.5-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b1
- aiohttp v3.7.4.post0
- system info: Linux 4.15.0-58-generic #64-Ubuntu SMP Tue Aug 6 11:12:41 UTC 2019
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
This is caused by https://github.com/Pycord-Development/pycord/blob/68e9cf3db5c311153899a0c577c68d88c0feb4cf/discord/commands/options.py#L88-L91
where it's merely checked to be `None`. A better solution would be to check if it's **in** the kwargs dict, no matter the value (which would allow `None`), and *then* pop it. When popping it, the default value is `None`, making it impossible to differentiate from actually being *passed* or being *absent*.
```py
# using "in" instead of popping the value with a default of None
# which would allow None as a default
self.required: bool = (
kwargs.pop("required", True) if "default" not in kwargs else False
)
# Popping the default value AFTERWARDS so that it doesn't matter if it was already None or not
self.default = kwargs.pop("default", None)
```
If you think this would fix the issue, I'd be more than happy to make the PR for it myself if necessary.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/commands/options.py`
Content:
```
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2021-present Pycord Development
5
6 Permission is hereby granted, free of charge, to any person obtaining a
7 copy of this software and associated documentation files (the "Software"),
8 to deal in the Software without restriction, including without limitation
9 the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 and/or sell copies of the Software, and to permit persons to whom the
11 Software is furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 DEALINGS IN THE SOFTWARE.
23 """
24
25 from typing import Any, Dict, List, Literal, Optional, Union
26
27 from ..enums import ChannelType, SlashCommandOptionType
28
29 __all__ = (
30 "ThreadOption",
31 "Option",
32 "OptionChoice",
33 "option",
34 )
35
36 channel_type_map = {
37 "TextChannel": ChannelType.text,
38 "VoiceChannel": ChannelType.voice,
39 "StageChannel": ChannelType.stage_voice,
40 "CategoryChannel": ChannelType.category,
41 "Thread": ChannelType.public_thread,
42 }
43
44
45 class ThreadOption:
46 def __init__(self, thread_type: Literal["public", "private", "news"]):
47 type_map = {
48 "public": ChannelType.public_thread,
49 "private": ChannelType.private_thread,
50 "news": ChannelType.news_thread,
51 }
52 self._type = type_map[thread_type]
53
54 @property
55 def __name__(self):
56 return "ThreadOption"
57
58
59 class Option:
60 def __init__(self, input_type: Any, /, description: str = None, **kwargs) -> None:
61 self.name: Optional[str] = kwargs.pop("name", None)
62 self.description = description or "No description provided"
63 self.converter = None
64 self._raw_type = input_type
65 self.channel_types: List[ChannelType] = kwargs.pop(
66 "channel_types", []
67 )
68 if not isinstance(input_type, SlashCommandOptionType):
69 if hasattr(input_type, "convert"):
70 self.converter = input_type
71 input_type = SlashCommandOptionType.string
72 else:
73 _type = SlashCommandOptionType.from_datatype(input_type)
74 if _type == SlashCommandOptionType.channel:
75 if not isinstance(input_type, tuple):
76 input_type = (input_type,)
77 for i in input_type:
78 if i.__name__ == "GuildChannel":
79 continue
80 if isinstance(i, ThreadOption):
81 self.channel_types.append(i._type)
82 continue
83
84 channel_type = channel_type_map[i.__name__]
85 self.channel_types.append(channel_type)
86 input_type = _type
87 self.input_type = input_type
88 self.default = kwargs.pop("default", None)
89 self.required: bool = (
90 kwargs.pop("required", True) if self.default is None else False
91 )
92 self.choices: List[OptionChoice] = [
93 o if isinstance(o, OptionChoice) else OptionChoice(o)
94 for o in kwargs.pop("choices", list())
95 ]
96
97 if self.input_type == SlashCommandOptionType.integer:
98 minmax_types = (int, type(None))
99 elif self.input_type == SlashCommandOptionType.number:
100 minmax_types = (int, float, type(None))
101 else:
102 minmax_types = (type(None),)
103 minmax_typehint = Optional[Union[minmax_types]] # type: ignore
104
105 self.min_value: minmax_typehint = kwargs.pop("min_value", None)
106 self.max_value: minmax_typehint = kwargs.pop("max_value", None)
107
108 if (
109 not isinstance(self.min_value, minmax_types)
110 and self.min_value is not None
111 ):
112 raise TypeError(
113 f'Expected {minmax_typehint} for min_value, got "{type(self.min_value).__name__}"'
114 )
115 if not (isinstance(self.max_value, minmax_types) or self.min_value is None):
116 raise TypeError(
117 f'Expected {minmax_typehint} for max_value, got "{type(self.max_value).__name__}"'
118 )
119
120 self.autocomplete = kwargs.pop("autocomplete", None)
121
122 def to_dict(self) -> Dict:
123 as_dict = {
124 "name": self.name,
125 "description": self.description,
126 "type": self.input_type.value,
127 "required": self.required,
128 "choices": [c.to_dict() for c in self.choices],
129 "autocomplete": bool(self.autocomplete),
130 }
131 if self.channel_types:
132 as_dict["channel_types"] = [t.value for t in self.channel_types]
133 if self.min_value is not None:
134 as_dict["min_value"] = self.min_value
135 if self.max_value is not None:
136 as_dict["max_value"] = self.max_value
137
138 return as_dict
139
140 def __repr__(self):
141 return f"<discord.commands.{self.__class__.__name__} name={self.name}>"
142
143
144 class OptionChoice:
145 def __init__(self, name: str, value: Optional[Union[str, int, float]] = None):
146 self.name = name
147 self.value = value if value is not None else name
148
149 def to_dict(self) -> Dict[str, Union[str, int, float]]:
150 return {"name": self.name, "value": self.value}
151
152
153 def option(name, type=None, **kwargs):
154 """A decorator that can be used instead of typehinting Option"""
155
156 def decorator(func):
157 nonlocal type
158 type = type or func.__annotations__.get(name, str)
159 func.__annotations__[name] = Option(type, **kwargs)
160 return func
161
162 return decorator
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/commands/options.py b/discord/commands/options.py
--- a/discord/commands/options.py
+++ b/discord/commands/options.py
@@ -85,10 +85,10 @@
self.channel_types.append(channel_type)
input_type = _type
self.input_type = input_type
- self.default = kwargs.pop("default", None)
self.required: bool = (
- kwargs.pop("required", True) if self.default is None else False
+ kwargs.pop("required", True) if "default" not in kwargs else False
)
+ self.default = kwargs.pop("default", None)
self.choices: List[OptionChoice] = [
o if isinstance(o, OptionChoice) else OptionChoice(o)
for o in kwargs.pop("choices", list())
| {"golden_diff": "diff --git a/discord/commands/options.py b/discord/commands/options.py\n--- a/discord/commands/options.py\n+++ b/discord/commands/options.py\n@@ -85,10 +85,10 @@\n self.channel_types.append(channel_type)\n input_type = _type\n self.input_type = input_type\n- self.default = kwargs.pop(\"default\", None)\n self.required: bool = (\n- kwargs.pop(\"required\", True) if self.default is None else False\n+ kwargs.pop(\"required\", True) if \"default\" not in kwargs else False\n )\n+ self.default = kwargs.pop(\"default\", None)\n self.choices: List[OptionChoice] = [\n o if isinstance(o, OptionChoice) else OptionChoice(o)\n for o in kwargs.pop(\"choices\", list())\n", "issue": "Options with a default value of None still require the \"required\" kwarg\n### Summary\r\n\r\nWhen using `None` as the default value, `required` defaults to True even though there _is_ a default value.\r\n\r\n### Reproduction Steps\r\n\r\n- Make an application command (slash command) with at least one Option\r\n- Give the option a default value of None, and see that the field is **required** in Discord (shows a red error message when trying to send the command)\r\n- Change the default value to something else, and see that the field is now no longer required in Discord\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\nimport discord\r\nfrom discord.commands import Option\r\n\r\nbot = discord.Bot()\r\n\r\[email protected]_command()\r\nasync def test(ctx, arg: Option(int, default=None)):\r\n pass\r\n\r\nbot.run(...)\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nIf any value at all is passed, `required` should be `False`, no matter what that value may be. `None` is a pretty common use case and shouldn't always signify that there is **no** default value.\r\n\r\n### Actual Results\r\n\r\nIf the value passed is `None`, it is treated as if no default value is present.\r\n\r\nError in Discord itself: **This option is required. Specify a value.**\r\n\r\nTo allow this default value to be used, the Option would have to be specified as\r\n\r\n```py\r\nOption(int, default=None, required=False)\r\n```\r\nwhich feels off, if `default` is present then `required` shouldn't be.\r\n\r\n### Intents\r\n\r\ndefault + members\r\n\r\n### System Information\r\n\r\n- Python v3.9.5-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b1\r\n- aiohttp v3.7.4.post0\r\n- system info: Linux 4.15.0-58-generic #64-Ubuntu SMP Tue Aug 6 11:12:41 UTC 2019\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nThis is caused by https://github.com/Pycord-Development/pycord/blob/68e9cf3db5c311153899a0c577c68d88c0feb4cf/discord/commands/options.py#L88-L91\r\n\r\nwhere it's merely checked to be `None`. A better solution would be to check if it's **in** the kwargs dict, no matter the value (which would allow `None`), and *then* pop it. When popping it, the default value is `None`, making it impossible to differentiate from actually being *passed* or being *absent*.\r\n\r\n```py\r\n # using \"in\" instead of popping the value with a default of None\r\n # which would allow None as a default\r\n self.required: bool = (\r\n kwargs.pop(\"required\", True) if \"default\" not in kwargs else False\r\n )\r\n # Popping the default value AFTERWARDS so that it doesn't matter if it was already None or not\r\n self.default = kwargs.pop(\"default\", None)\r\n```\r\n\r\nIf you think this would fix the issue, I'd be more than happy to make the PR for it myself if necessary.\nOptions with a default value of None still require the \"required\" kwarg\n### Summary\r\n\r\nWhen using `None` as the default value, `required` defaults to True even though there _is_ a default value.\r\n\r\n### Reproduction Steps\r\n\r\n- Make an application command (slash command) with at least one Option\r\n- Give the option a default value of None, and see that the field is **required** in Discord (shows a red error message when trying to send the command)\r\n- Change the default value to something else, and see that the field is now no longer required in Discord\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\nimport discord\r\nfrom discord.commands import Option\r\n\r\nbot = discord.Bot()\r\n\r\[email protected]_command()\r\nasync def test(ctx, arg: Option(int, default=None)):\r\n pass\r\n\r\nbot.run(...)\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nIf any value at all is passed, `required` should be `False`, no matter what that value may be. `None` is a pretty common use case and shouldn't always signify that there is **no** default value.\r\n\r\n### Actual Results\r\n\r\nIf the value passed is `None`, it is treated as if no default value is present.\r\n\r\nError in Discord itself: **This option is required. Specify a value.**\r\n\r\nTo allow this default value to be used, the Option would have to be specified as\r\n\r\n```py\r\nOption(int, default=None, required=False)\r\n```\r\nwhich feels off, if `default` is present then `required` shouldn't be.\r\n\r\n### Intents\r\n\r\ndefault + members\r\n\r\n### System Information\r\n\r\n- Python v3.9.5-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b1\r\n- aiohttp v3.7.4.post0\r\n- system info: Linux 4.15.0-58-generic #64-Ubuntu SMP Tue Aug 6 11:12:41 UTC 2019\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nThis is caused by https://github.com/Pycord-Development/pycord/blob/68e9cf3db5c311153899a0c577c68d88c0feb4cf/discord/commands/options.py#L88-L91\r\n\r\nwhere it's merely checked to be `None`. A better solution would be to check if it's **in** the kwargs dict, no matter the value (which would allow `None`), and *then* pop it. When popping it, the default value is `None`, making it impossible to differentiate from actually being *passed* or being *absent*.\r\n\r\n```py\r\n # using \"in\" instead of popping the value with a default of None\r\n # which would allow None as a default\r\n self.required: bool = (\r\n kwargs.pop(\"required\", True) if \"default\" not in kwargs else False\r\n )\r\n # Popping the default value AFTERWARDS so that it doesn't matter if it was already None or not\r\n self.default = kwargs.pop(\"default\", None)\r\n```\r\n\r\nIf you think this would fix the issue, I'd be more than happy to make the PR for it myself if necessary.\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nfrom ..enums import ChannelType, SlashCommandOptionType\n\n__all__ = (\n \"ThreadOption\",\n \"Option\",\n \"OptionChoice\",\n \"option\",\n)\n\nchannel_type_map = {\n \"TextChannel\": ChannelType.text,\n \"VoiceChannel\": ChannelType.voice,\n \"StageChannel\": ChannelType.stage_voice,\n \"CategoryChannel\": ChannelType.category,\n \"Thread\": ChannelType.public_thread,\n}\n\n\nclass ThreadOption:\n def __init__(self, thread_type: Literal[\"public\", \"private\", \"news\"]):\n type_map = {\n \"public\": ChannelType.public_thread,\n \"private\": ChannelType.private_thread,\n \"news\": ChannelType.news_thread,\n }\n self._type = type_map[thread_type]\n\n @property\n def __name__(self):\n return \"ThreadOption\"\n\n\nclass Option:\n def __init__(self, input_type: Any, /, description: str = None, **kwargs) -> None:\n self.name: Optional[str] = kwargs.pop(\"name\", None)\n self.description = description or \"No description provided\"\n self.converter = None\n self._raw_type = input_type\n self.channel_types: List[ChannelType] = kwargs.pop(\n \"channel_types\", []\n )\n if not isinstance(input_type, SlashCommandOptionType):\n if hasattr(input_type, \"convert\"):\n self.converter = input_type\n input_type = SlashCommandOptionType.string\n else:\n _type = SlashCommandOptionType.from_datatype(input_type)\n if _type == SlashCommandOptionType.channel:\n if not isinstance(input_type, tuple):\n input_type = (input_type,)\n for i in input_type:\n if i.__name__ == \"GuildChannel\":\n continue\n if isinstance(i, ThreadOption):\n self.channel_types.append(i._type)\n continue\n\n channel_type = channel_type_map[i.__name__]\n self.channel_types.append(channel_type)\n input_type = _type\n self.input_type = input_type\n self.default = kwargs.pop(\"default\", None)\n self.required: bool = (\n kwargs.pop(\"required\", True) if self.default is None else False\n )\n self.choices: List[OptionChoice] = [\n o if isinstance(o, OptionChoice) else OptionChoice(o)\n for o in kwargs.pop(\"choices\", list())\n ]\n\n if self.input_type == SlashCommandOptionType.integer:\n minmax_types = (int, type(None))\n elif self.input_type == SlashCommandOptionType.number:\n minmax_types = (int, float, type(None))\n else:\n minmax_types = (type(None),)\n minmax_typehint = Optional[Union[minmax_types]] # type: ignore\n\n self.min_value: minmax_typehint = kwargs.pop(\"min_value\", None)\n self.max_value: minmax_typehint = kwargs.pop(\"max_value\", None)\n\n if (\n not isinstance(self.min_value, minmax_types)\n and self.min_value is not None\n ):\n raise TypeError(\n f'Expected {minmax_typehint} for min_value, got \"{type(self.min_value).__name__}\"'\n )\n if not (isinstance(self.max_value, minmax_types) or self.min_value is None):\n raise TypeError(\n f'Expected {minmax_typehint} for max_value, got \"{type(self.max_value).__name__}\"'\n )\n\n self.autocomplete = kwargs.pop(\"autocomplete\", None)\n\n def to_dict(self) -> Dict:\n as_dict = {\n \"name\": self.name,\n \"description\": self.description,\n \"type\": self.input_type.value,\n \"required\": self.required,\n \"choices\": [c.to_dict() for c in self.choices],\n \"autocomplete\": bool(self.autocomplete),\n }\n if self.channel_types:\n as_dict[\"channel_types\"] = [t.value for t in self.channel_types]\n if self.min_value is not None:\n as_dict[\"min_value\"] = self.min_value\n if self.max_value is not None:\n as_dict[\"max_value\"] = self.max_value\n\n return as_dict\n\n def __repr__(self):\n return f\"<discord.commands.{self.__class__.__name__} name={self.name}>\"\n\n\nclass OptionChoice:\n def __init__(self, name: str, value: Optional[Union[str, int, float]] = None):\n self.name = name\n self.value = value if value is not None else name\n\n def to_dict(self) -> Dict[str, Union[str, int, float]]:\n return {\"name\": self.name, \"value\": self.value}\n\n\ndef option(name, type=None, **kwargs):\n \"\"\"A decorator that can be used instead of typehinting Option\"\"\"\n\n def decorator(func):\n nonlocal type\n type = type or func.__annotations__.get(name, str)\n func.__annotations__[name] = Option(type, **kwargs)\n return func\n\n return decorator\n", "path": "discord/commands/options.py"}], "after_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nfrom ..enums import ChannelType, SlashCommandOptionType\n\n__all__ = (\n \"ThreadOption\",\n \"Option\",\n \"OptionChoice\",\n \"option\",\n)\n\nchannel_type_map = {\n \"TextChannel\": ChannelType.text,\n \"VoiceChannel\": ChannelType.voice,\n \"StageChannel\": ChannelType.stage_voice,\n \"CategoryChannel\": ChannelType.category,\n \"Thread\": ChannelType.public_thread,\n}\n\n\nclass ThreadOption:\n def __init__(self, thread_type: Literal[\"public\", \"private\", \"news\"]):\n type_map = {\n \"public\": ChannelType.public_thread,\n \"private\": ChannelType.private_thread,\n \"news\": ChannelType.news_thread,\n }\n self._type = type_map[thread_type]\n\n @property\n def __name__(self):\n return \"ThreadOption\"\n\n\nclass Option:\n def __init__(self, input_type: Any, /, description: str = None, **kwargs) -> None:\n self.name: Optional[str] = kwargs.pop(\"name\", None)\n self.description = description or \"No description provided\"\n self.converter = None\n self._raw_type = input_type\n self.channel_types: List[ChannelType] = kwargs.pop(\n \"channel_types\", []\n )\n if not isinstance(input_type, SlashCommandOptionType):\n if hasattr(input_type, \"convert\"):\n self.converter = input_type\n input_type = SlashCommandOptionType.string\n else:\n _type = SlashCommandOptionType.from_datatype(input_type)\n if _type == SlashCommandOptionType.channel:\n if not isinstance(input_type, tuple):\n input_type = (input_type,)\n for i in input_type:\n if i.__name__ == \"GuildChannel\":\n continue\n if isinstance(i, ThreadOption):\n self.channel_types.append(i._type)\n continue\n\n channel_type = channel_type_map[i.__name__]\n self.channel_types.append(channel_type)\n input_type = _type\n self.input_type = input_type\n self.required: bool = (\n kwargs.pop(\"required\", True) if \"default\" not in kwargs else False\n )\n self.default = kwargs.pop(\"default\", None)\n self.choices: List[OptionChoice] = [\n o if isinstance(o, OptionChoice) else OptionChoice(o)\n for o in kwargs.pop(\"choices\", list())\n ]\n\n if self.input_type == SlashCommandOptionType.integer:\n minmax_types = (int, type(None))\n elif self.input_type == SlashCommandOptionType.number:\n minmax_types = (int, float, type(None))\n else:\n minmax_types = (type(None),)\n minmax_typehint = Optional[Union[minmax_types]] # type: ignore\n\n self.min_value: minmax_typehint = kwargs.pop(\"min_value\", None)\n self.max_value: minmax_typehint = kwargs.pop(\"max_value\", None)\n\n if (\n not isinstance(self.min_value, minmax_types)\n and self.min_value is not None\n ):\n raise TypeError(\n f'Expected {minmax_typehint} for min_value, got \"{type(self.min_value).__name__}\"'\n )\n if not (isinstance(self.max_value, minmax_types) or self.min_value is None):\n raise TypeError(\n f'Expected {minmax_typehint} for max_value, got \"{type(self.max_value).__name__}\"'\n )\n\n self.autocomplete = kwargs.pop(\"autocomplete\", None)\n\n def to_dict(self) -> Dict:\n as_dict = {\n \"name\": self.name,\n \"description\": self.description,\n \"type\": self.input_type.value,\n \"required\": self.required,\n \"choices\": [c.to_dict() for c in self.choices],\n \"autocomplete\": bool(self.autocomplete),\n }\n if self.channel_types:\n as_dict[\"channel_types\"] = [t.value for t in self.channel_types]\n if self.min_value is not None:\n as_dict[\"min_value\"] = self.min_value\n if self.max_value is not None:\n as_dict[\"max_value\"] = self.max_value\n\n return as_dict\n\n def __repr__(self):\n return f\"<discord.commands.{self.__class__.__name__} name={self.name}>\"\n\n\nclass OptionChoice:\n def __init__(self, name: str, value: Optional[Union[str, int, float]] = None):\n self.name = name\n self.value = value if value is not None else name\n\n def to_dict(self) -> Dict[str, Union[str, int, float]]:\n return {\"name\": self.name, \"value\": self.value}\n\n\ndef option(name, type=None, **kwargs):\n \"\"\"A decorator that can be used instead of typehinting Option\"\"\"\n\n def decorator(func):\n nonlocal type\n type = type or func.__annotations__.get(name, str)\n func.__annotations__[name] = Option(type, **kwargs)\n return func\n\n return decorator\n", "path": "discord/commands/options.py"}]} | 3,418 | 178 |
gh_patches_debug_38887 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow use of complex numbers with denoise_nl_means
The code of non_local_means.py converts the dataset to 'float', but I have a dataset with complex numbers. Is there a possibility to support more data types?
Thanks
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/restoration/non_local_means.py`
Content:
```
1 import numpy as np
2 from warnings import warn
3 from .._shared import utils
4 from .._shared.utils import convert_to_float
5 from ._nl_means_denoising import (
6 _nl_means_denoising_2d,
7 _nl_means_denoising_3d,
8 _fast_nl_means_denoising_2d,
9 _fast_nl_means_denoising_3d)
10
11
12 @utils.channel_as_last_axis()
13 @utils.deprecate_multichannel_kwarg(multichannel_position=4)
14 def denoise_nl_means(image, patch_size=7, patch_distance=11, h=0.1,
15 multichannel=False, fast_mode=True, sigma=0., *,
16 preserve_range=None, channel_axis=None):
17 """Perform non-local means denoising on 2-D or 3-D grayscale images, and
18 2-D RGB images.
19
20 Parameters
21 ----------
22 image : 2D or 3D ndarray
23 Input image to be denoised, which can be 2D or 3D, and grayscale
24 or RGB (for 2D images only, see ``multichannel`` parameter).
25 patch_size : int, optional
26 Size of patches used for denoising.
27 patch_distance : int, optional
28 Maximal distance in pixels where to search patches used for denoising.
29 h : float, optional
30 Cut-off distance (in gray levels). The higher h, the more permissive
31 one is in accepting patches. A higher h results in a smoother image,
32 at the expense of blurring features. For a Gaussian noise of standard
33 deviation sigma, a rule of thumb is to choose the value of h to be
34 sigma of slightly less.
35 multichannel : bool, optional
36 Whether the last axis of the image is to be interpreted as multiple
37 channels or another spatial dimension. This argument is deprecated:
38 specify `channel_axis` instead.
39 fast_mode : bool, optional
40 If True (default value), a fast version of the non-local means
41 algorithm is used. If False, the original version of non-local means is
42 used. See the Notes section for more details about the algorithms.
43 sigma : float, optional
44 The standard deviation of the (Gaussian) noise. If provided, a more
45 robust computation of patch weights is computed that takes the expected
46 noise variance into account (see Notes below).
47 preserve_range : bool, optional
48 Whether to keep the original range of values. Otherwise, the input
49 image is converted according to the conventions of `img_as_float`.
50 Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
51 channel_axis : int or None, optional
52 If None, the image is assumed to be a grayscale (single channel) image.
53 Otherwise, this parameter indicates which axis of the array corresponds
54 to channels.
55
56 .. versionadded:: 0.19
57 ``channel_axis`` was added in 0.19.
58
59 Returns
60 -------
61 result : ndarray
62 Denoised image, of same shape as `image`.
63
64 Notes
65 -----
66
67 The non-local means algorithm is well suited for denoising images with
68 specific textures. The principle of the algorithm is to average the value
69 of a given pixel with values of other pixels in a limited neighbourhood,
70 provided that the *patches* centered on the other pixels are similar enough
71 to the patch centered on the pixel of interest.
72
73 In the original version of the algorithm [1]_, corresponding to
74 ``fast=False``, the computational complexity is::
75
76 image.size * patch_size ** image.ndim * patch_distance ** image.ndim
77
78 Hence, changing the size of patches or their maximal distance has a
79 strong effect on computing times, especially for 3-D images.
80
81 However, the default behavior corresponds to ``fast_mode=True``, for which
82 another version of non-local means [2]_ is used, corresponding to a
83 complexity of::
84
85 image.size * patch_distance ** image.ndim
86
87 The computing time depends only weakly on the patch size, thanks to
88 the computation of the integral of patches distances for a given
89 shift, that reduces the number of operations [1]_. Therefore, this
90 algorithm executes faster than the classic algorithm
91 (``fast_mode=False``), at the expense of using twice as much memory.
92 This implementation has been proven to be more efficient compared to
93 other alternatives, see e.g. [3]_.
94
95 Compared to the classic algorithm, all pixels of a patch contribute
96 to the distance to another patch with the same weight, no matter
97 their distance to the center of the patch. This coarser computation
98 of the distance can result in a slightly poorer denoising
99 performance. Moreover, for small images (images with a linear size
100 that is only a few times the patch size), the classic algorithm can
101 be faster due to boundary effects.
102
103 The image is padded using the `reflect` mode of `skimage.util.pad`
104 before denoising.
105
106 If the noise standard deviation, `sigma`, is provided a more robust
107 computation of patch weights is used. Subtracting the known noise variance
108 from the computed patch distances improves the estimates of patch
109 similarity, giving a moderate improvement to denoising performance [4]_.
110 It was also mentioned as an option for the fast variant of the algorithm in
111 [3]_.
112
113 When `sigma` is provided, a smaller `h` should typically be used to
114 avoid oversmoothing. The optimal value for `h` depends on the image
115 content and noise level, but a reasonable starting point is
116 ``h = 0.8 * sigma`` when `fast_mode` is `True`, or ``h = 0.6 * sigma`` when
117 `fast_mode` is `False`.
118
119 References
120 ----------
121 .. [1] A. Buades, B. Coll, & J-M. Morel. A non-local algorithm for image
122 denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE.
123 :DOI:`10.1109/CVPR.2005.38`
124
125 .. [2] J. Darbon, A. Cunha, T.F. Chan, S. Osher, and G.J. Jensen, Fast
126 nonlocal filtering applied to electron cryomicroscopy, in 5th IEEE
127 International Symposium on Biomedical Imaging: From Nano to Macro,
128 2008, pp. 1331-1334.
129 :DOI:`10.1109/ISBI.2008.4541250`
130
131 .. [3] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means
132 Denoising. Image Processing On Line, 2014, vol. 4, pp. 300-326.
133 :DOI:`10.5201/ipol.2014.120`
134
135 .. [4] A. Buades, B. Coll, & J-M. Morel. Non-Local Means Denoising.
136 Image Processing On Line, 2011, vol. 1, pp. 208-212.
137 :DOI:`10.5201/ipol.2011.bcm_nlm`
138
139 Examples
140 --------
141 >>> a = np.zeros((40, 40))
142 >>> a[10:-10, 10:-10] = 1.
143 >>> rng = np.random.default_rng()
144 >>> a += 0.3 * rng.standard_normal(a.shape)
145 >>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)
146
147 """
148 if image.ndim == 2:
149 image = image[..., np.newaxis]
150 channel_axis = -1
151 if image.ndim != 3:
152 raise NotImplementedError("Non-local means denoising is only \
153 implemented for 2D grayscale and RGB images or 3-D grayscale images.")
154
155 if preserve_range is None and np.issubdtype(image.dtype, np.integer):
156 warn('Image dtype is not float. By default denoise_nl_means will '
157 'assume you want to preserve the range of your image '
158 '(preserve_range=True). In scikit-image 0.19 this behavior will '
159 'change to preserve_range=False. To avoid this warning, '
160 'explicitly specify the preserve_range parameter.',
161 stacklevel=2)
162 preserve_range = True
163
164 image = convert_to_float(image, preserve_range)
165 if not image.flags.c_contiguous:
166 image = np.ascontiguousarray(image)
167
168 kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)
169 if channel_axis is not None: # 2-D images
170 if fast_mode:
171 return _fast_nl_means_denoising_2d(image, **kwargs)
172 else:
173 return _nl_means_denoising_2d(image, **kwargs)
174 else: # 3-D grayscale
175 if fast_mode:
176 return _fast_nl_means_denoising_3d(image, **kwargs)
177 else:
178 return _nl_means_denoising_3d(image, **kwargs)
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/restoration/non_local_means.py b/skimage/restoration/non_local_means.py
--- a/skimage/restoration/non_local_means.py
+++ b/skimage/restoration/non_local_means.py
@@ -1,12 +1,14 @@
-import numpy as np
from warnings import warn
+
+import numpy as np
+
from .._shared import utils
from .._shared.utils import convert_to_float
-from ._nl_means_denoising import (
- _nl_means_denoising_2d,
- _nl_means_denoising_3d,
- _fast_nl_means_denoising_2d,
- _fast_nl_means_denoising_3d)
+from ._nl_means_denoising import (_nl_means_denoising_2d,
+ _nl_means_denoising_3d,
+ _fast_nl_means_denoising_2d,
+ _fast_nl_means_denoising_3d,
+ _fast_nl_means_denoising_4d)
@utils.channel_as_last_axis()
@@ -143,14 +145,18 @@
>>> rng = np.random.default_rng()
>>> a += 0.3 * rng.standard_normal(a.shape)
>>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)
-
"""
- if image.ndim == 2:
+ if channel_axis is None:
+ multichannel = False
image = image[..., np.newaxis]
- channel_axis = -1
- if image.ndim != 3:
- raise NotImplementedError("Non-local means denoising is only \
- implemented for 2D grayscale and RGB images or 3-D grayscale images.")
+ else:
+ multichannel = True
+
+ ndim_no_channel = image.ndim - 1
+ if (ndim_no_channel < 2) or (ndim_no_channel > 4):
+ raise NotImplementedError(
+ "Non-local means denoising is only implemented for 2D, "
+ "3D or 4D grayscale or multichannel images.")
if preserve_range is None and np.issubdtype(image.dtype, np.integer):
warn('Image dtype is not float. By default denoise_nl_means will '
@@ -166,13 +172,23 @@
image = np.ascontiguousarray(image)
kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)
- if channel_axis is not None: # 2-D images
+ if ndim_no_channel == 2:
+ nlm_func = (_fast_nl_means_denoising_2d if fast_mode else
+ _nl_means_denoising_2d)
+ elif ndim_no_channel == 3:
+ if multichannel and not fast_mode:
+ raise NotImplementedError(
+ "Multichannel 3D requires fast_mode to be True.")
if fast_mode:
- return _fast_nl_means_denoising_2d(image, **kwargs)
+ nlm_func = _fast_nl_means_denoising_3d
else:
- return _nl_means_denoising_2d(image, **kwargs)
- else: # 3-D grayscale
+ # have to drop the size 1 channel axis for slow mode
+ image = image[..., 0]
+ nlm_func = _nl_means_denoising_3d
+ elif ndim_no_channel == 4:
if fast_mode:
- return _fast_nl_means_denoising_3d(image, **kwargs)
+ nlm_func = _fast_nl_means_denoising_4d
else:
- return _nl_means_denoising_3d(image, **kwargs)
+ raise NotImplementedError("4D requires fast_mode to be True.")
+ dn = np.asarray(nlm_func(image, **kwargs))
+ return dn
| {"golden_diff": "diff --git a/skimage/restoration/non_local_means.py b/skimage/restoration/non_local_means.py\n--- a/skimage/restoration/non_local_means.py\n+++ b/skimage/restoration/non_local_means.py\n@@ -1,12 +1,14 @@\n-import numpy as np\n from warnings import warn\n+\n+import numpy as np\n+\n from .._shared import utils\n from .._shared.utils import convert_to_float\n-from ._nl_means_denoising import (\n- _nl_means_denoising_2d,\n- _nl_means_denoising_3d,\n- _fast_nl_means_denoising_2d,\n- _fast_nl_means_denoising_3d)\n+from ._nl_means_denoising import (_nl_means_denoising_2d,\n+ _nl_means_denoising_3d,\n+ _fast_nl_means_denoising_2d,\n+ _fast_nl_means_denoising_3d,\n+ _fast_nl_means_denoising_4d)\n \n \n @utils.channel_as_last_axis()\n@@ -143,14 +145,18 @@\n >>> rng = np.random.default_rng()\n >>> a += 0.3 * rng.standard_normal(a.shape)\n >>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)\n-\n \"\"\"\n- if image.ndim == 2:\n+ if channel_axis is None:\n+ multichannel = False\n image = image[..., np.newaxis]\n- channel_axis = -1\n- if image.ndim != 3:\n- raise NotImplementedError(\"Non-local means denoising is only \\\n- implemented for 2D grayscale and RGB images or 3-D grayscale images.\")\n+ else:\n+ multichannel = True\n+\n+ ndim_no_channel = image.ndim - 1\n+ if (ndim_no_channel < 2) or (ndim_no_channel > 4):\n+ raise NotImplementedError(\n+ \"Non-local means denoising is only implemented for 2D, \"\n+ \"3D or 4D grayscale or multichannel images.\")\n \n if preserve_range is None and np.issubdtype(image.dtype, np.integer):\n warn('Image dtype is not float. By default denoise_nl_means will '\n@@ -166,13 +172,23 @@\n image = np.ascontiguousarray(image)\n \n kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)\n- if channel_axis is not None: # 2-D images\n+ if ndim_no_channel == 2:\n+ nlm_func = (_fast_nl_means_denoising_2d if fast_mode else\n+ _nl_means_denoising_2d)\n+ elif ndim_no_channel == 3:\n+ if multichannel and not fast_mode:\n+ raise NotImplementedError(\n+ \"Multichannel 3D requires fast_mode to be True.\")\n if fast_mode:\n- return _fast_nl_means_denoising_2d(image, **kwargs)\n+ nlm_func = _fast_nl_means_denoising_3d\n else:\n- return _nl_means_denoising_2d(image, **kwargs)\n- else: # 3-D grayscale\n+ # have to drop the size 1 channel axis for slow mode\n+ image = image[..., 0]\n+ nlm_func = _nl_means_denoising_3d\n+ elif ndim_no_channel == 4:\n if fast_mode:\n- return _fast_nl_means_denoising_3d(image, **kwargs)\n+ nlm_func = _fast_nl_means_denoising_4d\n else:\n- return _nl_means_denoising_3d(image, **kwargs)\n+ raise NotImplementedError(\"4D requires fast_mode to be True.\")\n+ dn = np.asarray(nlm_func(image, **kwargs))\n+ return dn\n", "issue": "Allow use of complex numbers with denoise_nl_means\nThe code of non_local_means.py converts the dataset to 'float', but I have a dataset with complex numbers. Is there a possibility to support more data types? \r\n\r\nThanks\r\n```\r\n\n", "before_files": [{"content": "import numpy as np\nfrom warnings import warn\nfrom .._shared import utils\nfrom .._shared.utils import convert_to_float\nfrom ._nl_means_denoising import (\n _nl_means_denoising_2d,\n _nl_means_denoising_3d,\n _fast_nl_means_denoising_2d,\n _fast_nl_means_denoising_3d)\n\n\[email protected]_as_last_axis()\[email protected]_multichannel_kwarg(multichannel_position=4)\ndef denoise_nl_means(image, patch_size=7, patch_distance=11, h=0.1,\n multichannel=False, fast_mode=True, sigma=0., *,\n preserve_range=None, channel_axis=None):\n \"\"\"Perform non-local means denoising on 2-D or 3-D grayscale images, and\n 2-D RGB images.\n\n Parameters\n ----------\n image : 2D or 3D ndarray\n Input image to be denoised, which can be 2D or 3D, and grayscale\n or RGB (for 2D images only, see ``multichannel`` parameter).\n patch_size : int, optional\n Size of patches used for denoising.\n patch_distance : int, optional\n Maximal distance in pixels where to search patches used for denoising.\n h : float, optional\n Cut-off distance (in gray levels). The higher h, the more permissive\n one is in accepting patches. A higher h results in a smoother image,\n at the expense of blurring features. For a Gaussian noise of standard\n deviation sigma, a rule of thumb is to choose the value of h to be\n sigma of slightly less.\n multichannel : bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension. This argument is deprecated:\n specify `channel_axis` instead.\n fast_mode : bool, optional\n If True (default value), a fast version of the non-local means\n algorithm is used. If False, the original version of non-local means is\n used. See the Notes section for more details about the algorithms.\n sigma : float, optional\n The standard deviation of the (Gaussian) noise. If provided, a more\n robust computation of patch weights is computed that takes the expected\n noise variance into account (see Notes below).\n preserve_range : bool, optional\n Whether to keep the original range of values. Otherwise, the input\n image is converted according to the conventions of `img_as_float`.\n Also see https://scikit-image.org/docs/dev/user_guide/data_types.html\n channel_axis : int or None, optional\n If None, the image is assumed to be a grayscale (single channel) image.\n Otherwise, this parameter indicates which axis of the array corresponds\n to channels.\n\n .. versionadded:: 0.19\n ``channel_axis`` was added in 0.19.\n\n Returns\n -------\n result : ndarray\n Denoised image, of same shape as `image`.\n\n Notes\n -----\n\n The non-local means algorithm is well suited for denoising images with\n specific textures. The principle of the algorithm is to average the value\n of a given pixel with values of other pixels in a limited neighbourhood,\n provided that the *patches* centered on the other pixels are similar enough\n to the patch centered on the pixel of interest.\n\n In the original version of the algorithm [1]_, corresponding to\n ``fast=False``, the computational complexity is::\n\n image.size * patch_size ** image.ndim * patch_distance ** image.ndim\n\n Hence, changing the size of patches or their maximal distance has a\n strong effect on computing times, especially for 3-D images.\n\n However, the default behavior corresponds to ``fast_mode=True``, for which\n another version of non-local means [2]_ is used, corresponding to a\n complexity of::\n\n image.size * patch_distance ** image.ndim\n\n The computing time depends only weakly on the patch size, thanks to\n the computation of the integral of patches distances for a given\n shift, that reduces the number of operations [1]_. Therefore, this\n algorithm executes faster than the classic algorithm\n (``fast_mode=False``), at the expense of using twice as much memory.\n This implementation has been proven to be more efficient compared to\n other alternatives, see e.g. [3]_.\n\n Compared to the classic algorithm, all pixels of a patch contribute\n to the distance to another patch with the same weight, no matter\n their distance to the center of the patch. This coarser computation\n of the distance can result in a slightly poorer denoising\n performance. Moreover, for small images (images with a linear size\n that is only a few times the patch size), the classic algorithm can\n be faster due to boundary effects.\n\n The image is padded using the `reflect` mode of `skimage.util.pad`\n before denoising.\n\n If the noise standard deviation, `sigma`, is provided a more robust\n computation of patch weights is used. Subtracting the known noise variance\n from the computed patch distances improves the estimates of patch\n similarity, giving a moderate improvement to denoising performance [4]_.\n It was also mentioned as an option for the fast variant of the algorithm in\n [3]_.\n\n When `sigma` is provided, a smaller `h` should typically be used to\n avoid oversmoothing. The optimal value for `h` depends on the image\n content and noise level, but a reasonable starting point is\n ``h = 0.8 * sigma`` when `fast_mode` is `True`, or ``h = 0.6 * sigma`` when\n `fast_mode` is `False`.\n\n References\n ----------\n .. [1] A. Buades, B. Coll, & J-M. Morel. A non-local algorithm for image\n denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE.\n :DOI:`10.1109/CVPR.2005.38`\n\n .. [2] J. Darbon, A. Cunha, T.F. Chan, S. Osher, and G.J. Jensen, Fast\n nonlocal filtering applied to electron cryomicroscopy, in 5th IEEE\n International Symposium on Biomedical Imaging: From Nano to Macro,\n 2008, pp. 1331-1334.\n :DOI:`10.1109/ISBI.2008.4541250`\n\n .. [3] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means\n Denoising. Image Processing On Line, 2014, vol. 4, pp. 300-326.\n :DOI:`10.5201/ipol.2014.120`\n\n .. [4] A. Buades, B. Coll, & J-M. Morel. Non-Local Means Denoising.\n Image Processing On Line, 2011, vol. 1, pp. 208-212.\n :DOI:`10.5201/ipol.2011.bcm_nlm`\n\n Examples\n --------\n >>> a = np.zeros((40, 40))\n >>> a[10:-10, 10:-10] = 1.\n >>> rng = np.random.default_rng()\n >>> a += 0.3 * rng.standard_normal(a.shape)\n >>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)\n\n \"\"\"\n if image.ndim == 2:\n image = image[..., np.newaxis]\n channel_axis = -1\n if image.ndim != 3:\n raise NotImplementedError(\"Non-local means denoising is only \\\n implemented for 2D grayscale and RGB images or 3-D grayscale images.\")\n\n if preserve_range is None and np.issubdtype(image.dtype, np.integer):\n warn('Image dtype is not float. By default denoise_nl_means will '\n 'assume you want to preserve the range of your image '\n '(preserve_range=True). In scikit-image 0.19 this behavior will '\n 'change to preserve_range=False. To avoid this warning, '\n 'explicitly specify the preserve_range parameter.',\n stacklevel=2)\n preserve_range = True\n\n image = convert_to_float(image, preserve_range)\n if not image.flags.c_contiguous:\n image = np.ascontiguousarray(image)\n\n kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)\n if channel_axis is not None: # 2-D images\n if fast_mode:\n return _fast_nl_means_denoising_2d(image, **kwargs)\n else:\n return _nl_means_denoising_2d(image, **kwargs)\n else: # 3-D grayscale\n if fast_mode:\n return _fast_nl_means_denoising_3d(image, **kwargs)\n else:\n return _nl_means_denoising_3d(image, **kwargs)\n", "path": "skimage/restoration/non_local_means.py"}], "after_files": [{"content": "from warnings import warn\n\nimport numpy as np\n\nfrom .._shared import utils\nfrom .._shared.utils import convert_to_float\nfrom ._nl_means_denoising import (_nl_means_denoising_2d,\n _nl_means_denoising_3d,\n _fast_nl_means_denoising_2d,\n _fast_nl_means_denoising_3d,\n _fast_nl_means_denoising_4d)\n\n\[email protected]_as_last_axis()\[email protected]_multichannel_kwarg(multichannel_position=4)\ndef denoise_nl_means(image, patch_size=7, patch_distance=11, h=0.1,\n multichannel=False, fast_mode=True, sigma=0., *,\n preserve_range=None, channel_axis=None):\n \"\"\"Perform non-local means denoising on 2-D or 3-D grayscale images, and\n 2-D RGB images.\n\n Parameters\n ----------\n image : 2D or 3D ndarray\n Input image to be denoised, which can be 2D or 3D, and grayscale\n or RGB (for 2D images only, see ``multichannel`` parameter).\n patch_size : int, optional\n Size of patches used for denoising.\n patch_distance : int, optional\n Maximal distance in pixels where to search patches used for denoising.\n h : float, optional\n Cut-off distance (in gray levels). The higher h, the more permissive\n one is in accepting patches. A higher h results in a smoother image,\n at the expense of blurring features. For a Gaussian noise of standard\n deviation sigma, a rule of thumb is to choose the value of h to be\n sigma of slightly less.\n multichannel : bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension. This argument is deprecated:\n specify `channel_axis` instead.\n fast_mode : bool, optional\n If True (default value), a fast version of the non-local means\n algorithm is used. If False, the original version of non-local means is\n used. See the Notes section for more details about the algorithms.\n sigma : float, optional\n The standard deviation of the (Gaussian) noise. If provided, a more\n robust computation of patch weights is computed that takes the expected\n noise variance into account (see Notes below).\n preserve_range : bool, optional\n Whether to keep the original range of values. Otherwise, the input\n image is converted according to the conventions of `img_as_float`.\n Also see https://scikit-image.org/docs/dev/user_guide/data_types.html\n channel_axis : int or None, optional\n If None, the image is assumed to be a grayscale (single channel) image.\n Otherwise, this parameter indicates which axis of the array corresponds\n to channels.\n\n .. versionadded:: 0.19\n ``channel_axis`` was added in 0.19.\n\n Returns\n -------\n result : ndarray\n Denoised image, of same shape as `image`.\n\n Notes\n -----\n\n The non-local means algorithm is well suited for denoising images with\n specific textures. The principle of the algorithm is to average the value\n of a given pixel with values of other pixels in a limited neighbourhood,\n provided that the *patches* centered on the other pixels are similar enough\n to the patch centered on the pixel of interest.\n\n In the original version of the algorithm [1]_, corresponding to\n ``fast=False``, the computational complexity is::\n\n image.size * patch_size ** image.ndim * patch_distance ** image.ndim\n\n Hence, changing the size of patches or their maximal distance has a\n strong effect on computing times, especially for 3-D images.\n\n However, the default behavior corresponds to ``fast_mode=True``, for which\n another version of non-local means [2]_ is used, corresponding to a\n complexity of::\n\n image.size * patch_distance ** image.ndim\n\n The computing time depends only weakly on the patch size, thanks to\n the computation of the integral of patches distances for a given\n shift, that reduces the number of operations [1]_. Therefore, this\n algorithm executes faster than the classic algorithm\n (``fast_mode=False``), at the expense of using twice as much memory.\n This implementation has been proven to be more efficient compared to\n other alternatives, see e.g. [3]_.\n\n Compared to the classic algorithm, all pixels of a patch contribute\n to the distance to another patch with the same weight, no matter\n their distance to the center of the patch. This coarser computation\n of the distance can result in a slightly poorer denoising\n performance. Moreover, for small images (images with a linear size\n that is only a few times the patch size), the classic algorithm can\n be faster due to boundary effects.\n\n The image is padded using the `reflect` mode of `skimage.util.pad`\n before denoising.\n\n If the noise standard deviation, `sigma`, is provided a more robust\n computation of patch weights is used. Subtracting the known noise variance\n from the computed patch distances improves the estimates of patch\n similarity, giving a moderate improvement to denoising performance [4]_.\n It was also mentioned as an option for the fast variant of the algorithm in\n [3]_.\n\n When `sigma` is provided, a smaller `h` should typically be used to\n avoid oversmoothing. The optimal value for `h` depends on the image\n content and noise level, but a reasonable starting point is\n ``h = 0.8 * sigma`` when `fast_mode` is `True`, or ``h = 0.6 * sigma`` when\n `fast_mode` is `False`.\n\n References\n ----------\n .. [1] A. Buades, B. Coll, & J-M. Morel. A non-local algorithm for image\n denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE.\n :DOI:`10.1109/CVPR.2005.38`\n\n .. [2] J. Darbon, A. Cunha, T.F. Chan, S. Osher, and G.J. Jensen, Fast\n nonlocal filtering applied to electron cryomicroscopy, in 5th IEEE\n International Symposium on Biomedical Imaging: From Nano to Macro,\n 2008, pp. 1331-1334.\n :DOI:`10.1109/ISBI.2008.4541250`\n\n .. [3] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means\n Denoising. Image Processing On Line, 2014, vol. 4, pp. 300-326.\n :DOI:`10.5201/ipol.2014.120`\n\n .. [4] A. Buades, B. Coll, & J-M. Morel. Non-Local Means Denoising.\n Image Processing On Line, 2011, vol. 1, pp. 208-212.\n :DOI:`10.5201/ipol.2011.bcm_nlm`\n\n Examples\n --------\n >>> a = np.zeros((40, 40))\n >>> a[10:-10, 10:-10] = 1.\n >>> rng = np.random.default_rng()\n >>> a += 0.3 * rng.standard_normal(a.shape)\n >>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)\n \"\"\"\n if channel_axis is None:\n multichannel = False\n image = image[..., np.newaxis]\n else:\n multichannel = True\n\n ndim_no_channel = image.ndim - 1\n if (ndim_no_channel < 2) or (ndim_no_channel > 4):\n raise NotImplementedError(\n \"Non-local means denoising is only implemented for 2D, \"\n \"3D or 4D grayscale or multichannel images.\")\n\n if preserve_range is None and np.issubdtype(image.dtype, np.integer):\n warn('Image dtype is not float. By default denoise_nl_means will '\n 'assume you want to preserve the range of your image '\n '(preserve_range=True). In scikit-image 0.19 this behavior will '\n 'change to preserve_range=False. To avoid this warning, '\n 'explicitly specify the preserve_range parameter.',\n stacklevel=2)\n preserve_range = True\n\n image = convert_to_float(image, preserve_range)\n if not image.flags.c_contiguous:\n image = np.ascontiguousarray(image)\n\n kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)\n if ndim_no_channel == 2:\n nlm_func = (_fast_nl_means_denoising_2d if fast_mode else\n _nl_means_denoising_2d)\n elif ndim_no_channel == 3:\n if multichannel and not fast_mode:\n raise NotImplementedError(\n \"Multichannel 3D requires fast_mode to be True.\")\n if fast_mode:\n nlm_func = _fast_nl_means_denoising_3d\n else:\n # have to drop the size 1 channel axis for slow mode\n image = image[..., 0]\n nlm_func = _nl_means_denoising_3d\n elif ndim_no_channel == 4:\n if fast_mode:\n nlm_func = _fast_nl_means_denoising_4d\n else:\n raise NotImplementedError(\"4D requires fast_mode to be True.\")\n dn = np.asarray(nlm_func(image, **kwargs))\n return dn\n", "path": "skimage/restoration/non_local_means.py"}]} | 2,789 | 873 |
gh_patches_debug_9222 | rasdani/github-patches | git_diff | blakeblackshear__frigate-6497 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Support]: PTZ only with zoom
### Describe the problem you are having
I have ONFIV camera only support ZOOM action. When frigate try fetch presets Igot following error "Optional Action Not Implemented"
### Version
0.13 dev
### Frigate config file
```yaml
cameras:
back:
ffmpeg:
...
onvif:
host: 10.0.10.10
port: 8000
user: admin
password: password
```
### Relevant log output
```shell
<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope" xmlns:SOAP-ENC="http://www.w3.org/2003/05/soap-encoding" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xop="http://www.w3.org/2004/08/xop/include" xmlns:xmime4="http://www.w3.org/2004/11/xmlmime" xmlns:wsa5="http://www.w3.org/2005/08/addressing" xmlns:wsrf-bf="http://docs.oasis-open.org/wsrf/bf-2" xmlns:wstop="http://docs.oasis-open.org/wsn/t-1" xmlns:wsrf-r="http://docs.oasis-open.org/wsrf/r-2" xmlns:tes-e="http://www.onvif.org/ver10/events/wsdl/EventBinding" xmlns:tev="http://www.onvif.org/ver10/events/wsdl" xmlns:tes-nc="http://www.onvif.org/ver10/events/wsdl/NotificationConsumerBinding" xmlns:tes-np="http://www.onvif.org/ver10/events/wsdl/NotificationProducerBinding" xmlns:tes-sm="http://www.onvif.org/ver10/events/wsdl/SubscriptionManagerBinding" xmlns:tns1="http://www.onvif.org/ver10/topics" xmlns:xmime="http://www.w3.org/2004/06/xmlmime" xmlns:tt="http://www.onvif.org/ver10/schema" xmlns:wsnt="http://docs.oasis-open.org/wsn/b-2" xmlns:tds="http://www.onvif.org/ver10/device/wsdl" xmlns:timg="http://www.onvif.org/ver20/imaging/wsdl" xmlns:tmd="http://www.onvif.org/ver10/deviceIO/wsdl" xmlns:tptz="http://www.onvif.org/ver20/ptz/wsdl" xmlns:trt="http://www.onvif.org/ver10/media/wsdl" xmlns:tr2="http://www.onvif.org/ver20/media/wsdl" xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" xmlns:ter="http://www.onvif.org/ver10/error" xmlns:tan="http://www.onvif.org/ver20/analytics/wsdl" xmlns:tan-ae="http://www.onvif.org/ver20/analytics/wsdl/AnalyticsEngineBinding" xmlns:tan-re="http://www.onvif.org/ver20/analytics/wsdl/RuleEngineBinding" xmlns:trc="http://www.onvif.org/ver10/recording/wsdl" xmlns:trp="http://www.onvif.org/ver10/replay/wsdl" xmlns:tse="http://www.onvif.org/ver10/search/wsdl" xmlns:tpl="http://www.onvif.org/ver10/plus/wsdl" xmlns:tplt="http://www.onvif.org/ver10/plus/schema"><SOAP-ENV:Header></SOAP-ENV:Header><SOAP-ENV:Body><SOAP-ENV:Fault><SOAP-ENV:Code><SOAP-ENV:Value>SOAP-ENV:Receiver</SOAP-ENV:Value><SOAP-ENV:Subcode><SOAP-ENV:Value>ter:ActionNotSupported</SOAP-ENV:Value></SOAP-ENV:Subcode></SOAP-ENV:Code><SOAP-ENV:Reason><SOAP-ENV:Text xml:lang="en">Optional Action Not Implemented</SOAP-ENV:Text></SOAP-ENV:Reason><SOAP-ENV:Detail></SOAP-ENV:Detail></SOAP-ENV:Fault></SOAP-ENV:Body></SOAP-ENV:Envelope>
[2023-05-15 03:46:58] frigate.ptz ERROR : Unable to get presets from camera: front: Unknown error: Optional Action Not Implemented
```
### FFprobe output from your camera
```shell
no output
```
### Frigate stats
_No response_
### Operating system
Other
### Install method
Docker CLI
### Coral version
USB
### Network connection
Wired
### Camera make and model
BCS-P-265R3WSM-G
### Any other information that may be helpful
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frigate/ptz.py`
Content:
```
1 """Configure and control camera via onvif."""
2
3 import logging
4 import site
5
6 from enum import Enum
7 from onvif import ONVIFCamera, ONVIFError
8
9 from frigate.config import FrigateConfig
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class OnvifCommandEnum(str, Enum):
16 """Holds all possible move commands"""
17
18 init = "init"
19 move_down = "move_down"
20 move_left = "move_left"
21 move_right = "move_right"
22 move_up = "move_up"
23 preset = "preset"
24 stop = "stop"
25 zoom_in = "zoom_in"
26 zoom_out = "zoom_out"
27
28
29 class OnvifController:
30 def __init__(self, config: FrigateConfig) -> None:
31 self.cams: dict[str, ONVIFCamera] = {}
32
33 for cam_name, cam in config.cameras.items():
34 if not cam.enabled:
35 continue
36
37 if cam.onvif.host:
38 try:
39 self.cams[cam_name] = {
40 "onvif": ONVIFCamera(
41 cam.onvif.host,
42 cam.onvif.port,
43 cam.onvif.user,
44 cam.onvif.password,
45 wsdl_dir=site.getsitepackages()[0].replace(
46 "dist-packages", "site-packages"
47 )
48 + "/wsdl",
49 ),
50 "init": False,
51 "active": False,
52 "presets": {},
53 }
54 except ONVIFError as e:
55 logger.error(f"Onvif connection to {cam.name} failed: {e}")
56
57 def _init_onvif(self, camera_name: str) -> bool:
58 onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
59
60 # create init services
61 media = onvif.create_media_service()
62
63 try:
64 profile = media.GetProfiles()[0]
65 except ONVIFError as e:
66 logger.error(f"Unable to connect to camera: {camera_name}: {e}")
67 return False
68
69 ptz = onvif.create_ptz_service()
70 request = ptz.create_type("GetConfigurationOptions")
71 request.ConfigurationToken = profile.PTZConfiguration.token
72
73 # setup moving request
74 move_request = ptz.create_type("ContinuousMove")
75 move_request.ProfileToken = profile.token
76 self.cams[camera_name]["move_request"] = move_request
77
78 # setup existing presets
79 try:
80 presets: list[dict] = ptz.GetPresets({"ProfileToken": profile.token})
81 except ONVIFError as e:
82 logger.error(f"Unable to get presets from camera: {camera_name}: {e}")
83 return False
84
85 for preset in presets:
86 self.cams[camera_name]["presets"][preset["Name"].lower()] = preset["token"]
87
88 # get list of supported features
89 ptz_config = ptz.GetConfigurationOptions(request)
90 supported_features = []
91
92 if ptz_config.Spaces and ptz_config.Spaces.ContinuousPanTiltVelocitySpace:
93 supported_features.append("pt")
94
95 if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace:
96 supported_features.append("zoom")
97
98 self.cams[camera_name]["features"] = supported_features
99
100 self.cams[camera_name]["init"] = True
101 return True
102
103 def _stop(self, camera_name: str) -> None:
104 onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
105 move_request = self.cams[camera_name]["move_request"]
106 onvif.get_service("ptz").Stop(
107 {
108 "ProfileToken": move_request.ProfileToken,
109 "PanTilt": True,
110 "Zoom": True,
111 }
112 )
113 self.cams[camera_name]["active"] = False
114
115 def _move(self, camera_name: str, command: OnvifCommandEnum) -> None:
116 if self.cams[camera_name]["active"]:
117 logger.warning(
118 f"{camera_name} is already performing an action, stopping..."
119 )
120 self._stop(camera_name)
121
122 self.cams[camera_name]["active"] = True
123 onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
124 move_request = self.cams[camera_name]["move_request"]
125
126 if command == OnvifCommandEnum.move_left:
127 move_request.Velocity = {"PanTilt": {"x": -0.5, "y": 0}}
128 elif command == OnvifCommandEnum.move_right:
129 move_request.Velocity = {"PanTilt": {"x": 0.5, "y": 0}}
130 elif command == OnvifCommandEnum.move_up:
131 move_request.Velocity = {
132 "PanTilt": {
133 "x": 0,
134 "y": 0.5,
135 }
136 }
137 elif command == OnvifCommandEnum.move_down:
138 move_request.Velocity = {
139 "PanTilt": {
140 "x": 0,
141 "y": -0.5,
142 }
143 }
144
145 onvif.get_service("ptz").ContinuousMove(move_request)
146
147 def _move_to_preset(self, camera_name: str, preset: str) -> None:
148 if not preset in self.cams[camera_name]["presets"]:
149 logger.error(f"{preset} is not a valid preset for {camera_name}")
150 return
151
152 self.cams[camera_name]["active"] = True
153 move_request = self.cams[camera_name]["move_request"]
154 onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
155 preset_token = self.cams[camera_name]["presets"][preset]
156 onvif.get_service("ptz").GotoPreset(
157 {
158 "ProfileToken": move_request.ProfileToken,
159 "PresetToken": preset_token,
160 }
161 )
162 self.cams[camera_name]["active"] = False
163
164 def _zoom(self, camera_name: str, command: OnvifCommandEnum) -> None:
165 if self.cams[camera_name]["active"]:
166 logger.warning(
167 f"{camera_name} is already performing an action, stopping..."
168 )
169 self._stop(camera_name)
170
171 self.cams[camera_name]["active"] = True
172 onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
173 move_request = self.cams[camera_name]["move_request"]
174
175 if command == OnvifCommandEnum.zoom_in:
176 move_request.Velocity = {"Zoom": {"x": 0.5}}
177 elif command == OnvifCommandEnum.zoom_out:
178 move_request.Velocity = {"Zoom": {"x": -0.5}}
179
180 onvif.get_service("ptz").ContinuousMove(move_request)
181
182 def handle_command(
183 self, camera_name: str, command: OnvifCommandEnum, param: str = ""
184 ) -> None:
185 if camera_name not in self.cams.keys():
186 logger.error(f"Onvif is not setup for {camera_name}")
187 return
188
189 if not self.cams[camera_name]["init"]:
190 if not self._init_onvif(camera_name):
191 return
192
193 if command == OnvifCommandEnum.init:
194 # already init
195 return
196 elif command == OnvifCommandEnum.stop:
197 self._stop(camera_name)
198 elif command == OnvifCommandEnum.preset:
199 self._move_to_preset(camera_name, param)
200 elif (
201 command == OnvifCommandEnum.zoom_in or command == OnvifCommandEnum.zoom_out
202 ):
203 self._zoom(camera_name, command)
204 else:
205 self._move(camera_name, command)
206
207 def get_camera_info(self, camera_name: str) -> dict[str, any]:
208 if camera_name not in self.cams.keys():
209 logger.error(f"Onvif is not setup for {camera_name}")
210 return {}
211
212 if not self.cams[camera_name]["init"]:
213 self._init_onvif(camera_name)
214
215 return {
216 "name": camera_name,
217 "features": self.cams[camera_name]["features"],
218 "presets": list(self.cams[camera_name]["presets"].keys()),
219 }
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frigate/ptz.py b/frigate/ptz.py
--- a/frigate/ptz.py
+++ b/frigate/ptz.py
@@ -79,8 +79,8 @@
try:
presets: list[dict] = ptz.GetPresets({"ProfileToken": profile.token})
except ONVIFError as e:
- logger.error(f"Unable to get presets from camera: {camera_name}: {e}")
- return False
+ logger.warning(f"Unable to get presets from camera: {camera_name}: {e}")
+ presets = []
for preset in presets:
self.cams[camera_name]["presets"][preset["Name"].lower()] = preset["token"]
| {"golden_diff": "diff --git a/frigate/ptz.py b/frigate/ptz.py\n--- a/frigate/ptz.py\n+++ b/frigate/ptz.py\n@@ -79,8 +79,8 @@\n try:\n presets: list[dict] = ptz.GetPresets({\"ProfileToken\": profile.token})\n except ONVIFError as e:\n- logger.error(f\"Unable to get presets from camera: {camera_name}: {e}\")\n- return False\n+ logger.warning(f\"Unable to get presets from camera: {camera_name}: {e}\")\n+ presets = []\n \n for preset in presets:\n self.cams[camera_name][\"presets\"][preset[\"Name\"].lower()] = preset[\"token\"]\n", "issue": "[Support]: PTZ only with zoom\n### Describe the problem you are having\n\nI have ONFIV camera only support ZOOM action. When frigate try fetch presets Igot following error \"Optional Action Not Implemented\"\r\n\n\n### Version\n\n0.13 dev\n\n### Frigate config file\n\n```yaml\ncameras:\r\n back:\r\n ffmpeg:\r\n ...\r\n onvif:\r\n host: 10.0.10.10\r\n port: 8000\r\n user: admin\r\n password: password\n```\n\n\n### Relevant log output\n\n```shell\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://www.w3.org/2003/05/soap-envelope\" xmlns:SOAP-ENC=\"http://www.w3.org/2003/05/soap-encoding\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xop=\"http://www.w3.org/2004/08/xop/include\" xmlns:xmime4=\"http://www.w3.org/2004/11/xmlmime\" xmlns:wsa5=\"http://www.w3.org/2005/08/addressing\" xmlns:wsrf-bf=\"http://docs.oasis-open.org/wsrf/bf-2\" xmlns:wstop=\"http://docs.oasis-open.org/wsn/t-1\" xmlns:wsrf-r=\"http://docs.oasis-open.org/wsrf/r-2\" xmlns:tes-e=\"http://www.onvif.org/ver10/events/wsdl/EventBinding\" xmlns:tev=\"http://www.onvif.org/ver10/events/wsdl\" xmlns:tes-nc=\"http://www.onvif.org/ver10/events/wsdl/NotificationConsumerBinding\" xmlns:tes-np=\"http://www.onvif.org/ver10/events/wsdl/NotificationProducerBinding\" xmlns:tes-sm=\"http://www.onvif.org/ver10/events/wsdl/SubscriptionManagerBinding\" xmlns:tns1=\"http://www.onvif.org/ver10/topics\" xmlns:xmime=\"http://www.w3.org/2004/06/xmlmime\" xmlns:tt=\"http://www.onvif.org/ver10/schema\" xmlns:wsnt=\"http://docs.oasis-open.org/wsn/b-2\" xmlns:tds=\"http://www.onvif.org/ver10/device/wsdl\" xmlns:timg=\"http://www.onvif.org/ver20/imaging/wsdl\" xmlns:tmd=\"http://www.onvif.org/ver10/deviceIO/wsdl\" xmlns:tptz=\"http://www.onvif.org/ver20/ptz/wsdl\" xmlns:trt=\"http://www.onvif.org/ver10/media/wsdl\" xmlns:tr2=\"http://www.onvif.org/ver20/media/wsdl\" xmlns:wsse=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd\" xmlns:wsu=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd\" xmlns:ter=\"http://www.onvif.org/ver10/error\" xmlns:tan=\"http://www.onvif.org/ver20/analytics/wsdl\" xmlns:tan-ae=\"http://www.onvif.org/ver20/analytics/wsdl/AnalyticsEngineBinding\" xmlns:tan-re=\"http://www.onvif.org/ver20/analytics/wsdl/RuleEngineBinding\" xmlns:trc=\"http://www.onvif.org/ver10/recording/wsdl\" xmlns:trp=\"http://www.onvif.org/ver10/replay/wsdl\" xmlns:tse=\"http://www.onvif.org/ver10/search/wsdl\" xmlns:tpl=\"http://www.onvif.org/ver10/plus/wsdl\" xmlns:tplt=\"http://www.onvif.org/ver10/plus/schema\"><SOAP-ENV:Header></SOAP-ENV:Header><SOAP-ENV:Body><SOAP-ENV:Fault><SOAP-ENV:Code><SOAP-ENV:Value>SOAP-ENV:Receiver</SOAP-ENV:Value><SOAP-ENV:Subcode><SOAP-ENV:Value>ter:ActionNotSupported</SOAP-ENV:Value></SOAP-ENV:Subcode></SOAP-ENV:Code><SOAP-ENV:Reason><SOAP-ENV:Text xml:lang=\"en\">Optional Action Not Implemented</SOAP-ENV:Text></SOAP-ENV:Reason><SOAP-ENV:Detail></SOAP-ENV:Detail></SOAP-ENV:Fault></SOAP-ENV:Body></SOAP-ENV:Envelope>\r\n[2023-05-15 03:46:58] frigate.ptz ERROR : Unable to get presets from camera: front: Unknown error: Optional Action Not Implemented\n```\n\n\n### FFprobe output from your camera\n\n```shell\nno output\n```\n\n\n### Frigate stats\n\n_No response_\n\n### Operating system\n\nOther\n\n### Install method\n\nDocker CLI\n\n### Coral version\n\nUSB\n\n### Network connection\n\nWired\n\n### Camera make and model\n\nBCS-P-265R3WSM-G\n\n### Any other information that may be helpful\n\n_No response_\n", "before_files": [{"content": "\"\"\"Configure and control camera via onvif.\"\"\"\n\nimport logging\nimport site\n\nfrom enum import Enum\nfrom onvif import ONVIFCamera, ONVIFError\n\nfrom frigate.config import FrigateConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OnvifCommandEnum(str, Enum):\n \"\"\"Holds all possible move commands\"\"\"\n\n init = \"init\"\n move_down = \"move_down\"\n move_left = \"move_left\"\n move_right = \"move_right\"\n move_up = \"move_up\"\n preset = \"preset\"\n stop = \"stop\"\n zoom_in = \"zoom_in\"\n zoom_out = \"zoom_out\"\n\n\nclass OnvifController:\n def __init__(self, config: FrigateConfig) -> None:\n self.cams: dict[str, ONVIFCamera] = {}\n\n for cam_name, cam in config.cameras.items():\n if not cam.enabled:\n continue\n\n if cam.onvif.host:\n try:\n self.cams[cam_name] = {\n \"onvif\": ONVIFCamera(\n cam.onvif.host,\n cam.onvif.port,\n cam.onvif.user,\n cam.onvif.password,\n wsdl_dir=site.getsitepackages()[0].replace(\n \"dist-packages\", \"site-packages\"\n )\n + \"/wsdl\",\n ),\n \"init\": False,\n \"active\": False,\n \"presets\": {},\n }\n except ONVIFError as e:\n logger.error(f\"Onvif connection to {cam.name} failed: {e}\")\n\n def _init_onvif(self, camera_name: str) -> bool:\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n\n # create init services\n media = onvif.create_media_service()\n\n try:\n profile = media.GetProfiles()[0]\n except ONVIFError as e:\n logger.error(f\"Unable to connect to camera: {camera_name}: {e}\")\n return False\n\n ptz = onvif.create_ptz_service()\n request = ptz.create_type(\"GetConfigurationOptions\")\n request.ConfigurationToken = profile.PTZConfiguration.token\n\n # setup moving request\n move_request = ptz.create_type(\"ContinuousMove\")\n move_request.ProfileToken = profile.token\n self.cams[camera_name][\"move_request\"] = move_request\n\n # setup existing presets\n try:\n presets: list[dict] = ptz.GetPresets({\"ProfileToken\": profile.token})\n except ONVIFError as e:\n logger.error(f\"Unable to get presets from camera: {camera_name}: {e}\")\n return False\n\n for preset in presets:\n self.cams[camera_name][\"presets\"][preset[\"Name\"].lower()] = preset[\"token\"]\n\n # get list of supported features\n ptz_config = ptz.GetConfigurationOptions(request)\n supported_features = []\n\n if ptz_config.Spaces and ptz_config.Spaces.ContinuousPanTiltVelocitySpace:\n supported_features.append(\"pt\")\n\n if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace:\n supported_features.append(\"zoom\")\n\n self.cams[camera_name][\"features\"] = supported_features\n\n self.cams[camera_name][\"init\"] = True\n return True\n\n def _stop(self, camera_name: str) -> None:\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n move_request = self.cams[camera_name][\"move_request\"]\n onvif.get_service(\"ptz\").Stop(\n {\n \"ProfileToken\": move_request.ProfileToken,\n \"PanTilt\": True,\n \"Zoom\": True,\n }\n )\n self.cams[camera_name][\"active\"] = False\n\n def _move(self, camera_name: str, command: OnvifCommandEnum) -> None:\n if self.cams[camera_name][\"active\"]:\n logger.warning(\n f\"{camera_name} is already performing an action, stopping...\"\n )\n self._stop(camera_name)\n\n self.cams[camera_name][\"active\"] = True\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n move_request = self.cams[camera_name][\"move_request\"]\n\n if command == OnvifCommandEnum.move_left:\n move_request.Velocity = {\"PanTilt\": {\"x\": -0.5, \"y\": 0}}\n elif command == OnvifCommandEnum.move_right:\n move_request.Velocity = {\"PanTilt\": {\"x\": 0.5, \"y\": 0}}\n elif command == OnvifCommandEnum.move_up:\n move_request.Velocity = {\n \"PanTilt\": {\n \"x\": 0,\n \"y\": 0.5,\n }\n }\n elif command == OnvifCommandEnum.move_down:\n move_request.Velocity = {\n \"PanTilt\": {\n \"x\": 0,\n \"y\": -0.5,\n }\n }\n\n onvif.get_service(\"ptz\").ContinuousMove(move_request)\n\n def _move_to_preset(self, camera_name: str, preset: str) -> None:\n if not preset in self.cams[camera_name][\"presets\"]:\n logger.error(f\"{preset} is not a valid preset for {camera_name}\")\n return\n\n self.cams[camera_name][\"active\"] = True\n move_request = self.cams[camera_name][\"move_request\"]\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n preset_token = self.cams[camera_name][\"presets\"][preset]\n onvif.get_service(\"ptz\").GotoPreset(\n {\n \"ProfileToken\": move_request.ProfileToken,\n \"PresetToken\": preset_token,\n }\n )\n self.cams[camera_name][\"active\"] = False\n\n def _zoom(self, camera_name: str, command: OnvifCommandEnum) -> None:\n if self.cams[camera_name][\"active\"]:\n logger.warning(\n f\"{camera_name} is already performing an action, stopping...\"\n )\n self._stop(camera_name)\n\n self.cams[camera_name][\"active\"] = True\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n move_request = self.cams[camera_name][\"move_request\"]\n\n if command == OnvifCommandEnum.zoom_in:\n move_request.Velocity = {\"Zoom\": {\"x\": 0.5}}\n elif command == OnvifCommandEnum.zoom_out:\n move_request.Velocity = {\"Zoom\": {\"x\": -0.5}}\n\n onvif.get_service(\"ptz\").ContinuousMove(move_request)\n\n def handle_command(\n self, camera_name: str, command: OnvifCommandEnum, param: str = \"\"\n ) -> None:\n if camera_name not in self.cams.keys():\n logger.error(f\"Onvif is not setup for {camera_name}\")\n return\n\n if not self.cams[camera_name][\"init\"]:\n if not self._init_onvif(camera_name):\n return\n\n if command == OnvifCommandEnum.init:\n # already init\n return\n elif command == OnvifCommandEnum.stop:\n self._stop(camera_name)\n elif command == OnvifCommandEnum.preset:\n self._move_to_preset(camera_name, param)\n elif (\n command == OnvifCommandEnum.zoom_in or command == OnvifCommandEnum.zoom_out\n ):\n self._zoom(camera_name, command)\n else:\n self._move(camera_name, command)\n\n def get_camera_info(self, camera_name: str) -> dict[str, any]:\n if camera_name not in self.cams.keys():\n logger.error(f\"Onvif is not setup for {camera_name}\")\n return {}\n\n if not self.cams[camera_name][\"init\"]:\n self._init_onvif(camera_name)\n\n return {\n \"name\": camera_name,\n \"features\": self.cams[camera_name][\"features\"],\n \"presets\": list(self.cams[camera_name][\"presets\"].keys()),\n }\n", "path": "frigate/ptz.py"}], "after_files": [{"content": "\"\"\"Configure and control camera via onvif.\"\"\"\n\nimport logging\nimport site\n\nfrom enum import Enum\nfrom onvif import ONVIFCamera, ONVIFError\n\nfrom frigate.config import FrigateConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OnvifCommandEnum(str, Enum):\n \"\"\"Holds all possible move commands\"\"\"\n\n init = \"init\"\n move_down = \"move_down\"\n move_left = \"move_left\"\n move_right = \"move_right\"\n move_up = \"move_up\"\n preset = \"preset\"\n stop = \"stop\"\n zoom_in = \"zoom_in\"\n zoom_out = \"zoom_out\"\n\n\nclass OnvifController:\n def __init__(self, config: FrigateConfig) -> None:\n self.cams: dict[str, ONVIFCamera] = {}\n\n for cam_name, cam in config.cameras.items():\n if not cam.enabled:\n continue\n\n if cam.onvif.host:\n try:\n self.cams[cam_name] = {\n \"onvif\": ONVIFCamera(\n cam.onvif.host,\n cam.onvif.port,\n cam.onvif.user,\n cam.onvif.password,\n wsdl_dir=site.getsitepackages()[0].replace(\n \"dist-packages\", \"site-packages\"\n )\n + \"/wsdl\",\n ),\n \"init\": False,\n \"active\": False,\n \"presets\": {},\n }\n except ONVIFError as e:\n logger.error(f\"Onvif connection to {cam.name} failed: {e}\")\n\n def _init_onvif(self, camera_name: str) -> bool:\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n\n # create init services\n media = onvif.create_media_service()\n\n try:\n profile = media.GetProfiles()[0]\n except ONVIFError as e:\n logger.error(f\"Unable to connect to camera: {camera_name}: {e}\")\n return False\n\n ptz = onvif.create_ptz_service()\n request = ptz.create_type(\"GetConfigurationOptions\")\n request.ConfigurationToken = profile.PTZConfiguration.token\n\n # setup moving request\n move_request = ptz.create_type(\"ContinuousMove\")\n move_request.ProfileToken = profile.token\n self.cams[camera_name][\"move_request\"] = move_request\n\n # setup existing presets\n try:\n presets: list[dict] = ptz.GetPresets({\"ProfileToken\": profile.token})\n except ONVIFError as e:\n logger.warning(f\"Unable to get presets from camera: {camera_name}: {e}\")\n presets = []\n\n for preset in presets:\n self.cams[camera_name][\"presets\"][preset[\"Name\"].lower()] = preset[\"token\"]\n\n # get list of supported features\n ptz_config = ptz.GetConfigurationOptions(request)\n supported_features = []\n\n if ptz_config.Spaces and ptz_config.Spaces.ContinuousPanTiltVelocitySpace:\n supported_features.append(\"pt\")\n\n if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace:\n supported_features.append(\"zoom\")\n\n self.cams[camera_name][\"features\"] = supported_features\n\n self.cams[camera_name][\"init\"] = True\n return True\n\n def _stop(self, camera_name: str) -> None:\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n move_request = self.cams[camera_name][\"move_request\"]\n onvif.get_service(\"ptz\").Stop(\n {\n \"ProfileToken\": move_request.ProfileToken,\n \"PanTilt\": True,\n \"Zoom\": True,\n }\n )\n self.cams[camera_name][\"active\"] = False\n\n def _move(self, camera_name: str, command: OnvifCommandEnum) -> None:\n if self.cams[camera_name][\"active\"]:\n logger.warning(\n f\"{camera_name} is already performing an action, stopping...\"\n )\n self._stop(camera_name)\n\n self.cams[camera_name][\"active\"] = True\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n move_request = self.cams[camera_name][\"move_request\"]\n\n if command == OnvifCommandEnum.move_left:\n move_request.Velocity = {\"PanTilt\": {\"x\": -0.5, \"y\": 0}}\n elif command == OnvifCommandEnum.move_right:\n move_request.Velocity = {\"PanTilt\": {\"x\": 0.5, \"y\": 0}}\n elif command == OnvifCommandEnum.move_up:\n move_request.Velocity = {\n \"PanTilt\": {\n \"x\": 0,\n \"y\": 0.5,\n }\n }\n elif command == OnvifCommandEnum.move_down:\n move_request.Velocity = {\n \"PanTilt\": {\n \"x\": 0,\n \"y\": -0.5,\n }\n }\n\n onvif.get_service(\"ptz\").ContinuousMove(move_request)\n\n def _move_to_preset(self, camera_name: str, preset: str) -> None:\n if not preset in self.cams[camera_name][\"presets\"]:\n logger.error(f\"{preset} is not a valid preset for {camera_name}\")\n return\n\n self.cams[camera_name][\"active\"] = True\n move_request = self.cams[camera_name][\"move_request\"]\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n preset_token = self.cams[camera_name][\"presets\"][preset]\n onvif.get_service(\"ptz\").GotoPreset(\n {\n \"ProfileToken\": move_request.ProfileToken,\n \"PresetToken\": preset_token,\n }\n )\n self.cams[camera_name][\"active\"] = False\n\n def _zoom(self, camera_name: str, command: OnvifCommandEnum) -> None:\n if self.cams[camera_name][\"active\"]:\n logger.warning(\n f\"{camera_name} is already performing an action, stopping...\"\n )\n self._stop(camera_name)\n\n self.cams[camera_name][\"active\"] = True\n onvif: ONVIFCamera = self.cams[camera_name][\"onvif\"]\n move_request = self.cams[camera_name][\"move_request\"]\n\n if command == OnvifCommandEnum.zoom_in:\n move_request.Velocity = {\"Zoom\": {\"x\": 0.5}}\n elif command == OnvifCommandEnum.zoom_out:\n move_request.Velocity = {\"Zoom\": {\"x\": -0.5}}\n\n onvif.get_service(\"ptz\").ContinuousMove(move_request)\n\n def handle_command(\n self, camera_name: str, command: OnvifCommandEnum, param: str = \"\"\n ) -> None:\n if camera_name not in self.cams.keys():\n logger.error(f\"Onvif is not setup for {camera_name}\")\n return\n\n if not self.cams[camera_name][\"init\"]:\n if not self._init_onvif(camera_name):\n return\n\n if command == OnvifCommandEnum.init:\n # already init\n return\n elif command == OnvifCommandEnum.stop:\n self._stop(camera_name)\n elif command == OnvifCommandEnum.preset:\n self._move_to_preset(camera_name, param)\n elif (\n command == OnvifCommandEnum.zoom_in or command == OnvifCommandEnum.zoom_out\n ):\n self._zoom(camera_name, command)\n else:\n self._move(camera_name, command)\n\n def get_camera_info(self, camera_name: str) -> dict[str, any]:\n if camera_name not in self.cams.keys():\n logger.error(f\"Onvif is not setup for {camera_name}\")\n return {}\n\n if not self.cams[camera_name][\"init\"]:\n self._init_onvif(camera_name)\n\n return {\n \"name\": camera_name,\n \"features\": self.cams[camera_name][\"features\"],\n \"presets\": list(self.cams[camera_name][\"presets\"].keys()),\n }\n", "path": "frigate/ptz.py"}]} | 3,844 | 159 |
gh_patches_debug_18264 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2912 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E0002: Unknown exception while processing rule E2530: 'dict' object has no attribute 'startswith'
### CloudFormation Lint Version
0.82.0
### What operating system are you using?
windows 11
### Describe the bug
receive the E2530 error even though i dont have SnapStart in my lambda resource
### Expected behavior
no error
### Reproduction template
if i remove this lambda resource, it doesnt throw the error
```
Lambda:
Type: AWS::Lambda::Function
DependsOn:
- LambdaLogGroup
Properties:
Architectures:
- !If
- ConditionLambdaArmSupport
- !Ref OSArchitechture
- x86_64
Code:
S3Bucket:
Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-artifact-s3-bucket'
S3Key: !Ref LambdaS3Key
S3ObjectVersion: !Ref LambdaS3ObjectVersion
Description: !Sub 'Lambda Function - ${LambdaPurpose}'
FunctionName: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-lambda-${LambdaPurpose}'
Handler: !Ref LambdaHandler
Layers:
- !Ref LambdaLayer
MemorySize: !Ref LambdaMemory
Role: !GetAtt LambdaRole.Arn
Runtime: !Ref LambdaRuntime
Timeout: !Ref LambdaTimeout
VpcConfig: !If
- ConditionVpcAttachment
- SecurityGroupIds:
- Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-sg-${LambdaPurpose}'
SubnetIds:
- Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${VpcAttachment}-subnet-1-id'
- Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${VpcAttachment}-subnet-2-id'
- !If
- ConditionThirdAZ
- Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${VpcAttachment}-subnet-3-id'
- !Ref AWS::NoValue
- !Ref AWS::NoValue
Tags:
- Key: Name
Value: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-lambda-${LambdaPurpose}'
- Key: region-abbreviation
Value: !Ref Region
- Key: business-unit
Value: !Ref BusinessUnit
- Key: environment-type
Value: !Ref Environment
- Key: app-name
Value: !Ref AppName
- Key: service-type
Value: !Ref Service
- Key: cost-center
Value: !Ref CostCenter
- Key: compliance-type
Value: !Ref Compliance
- Key: owner-name
Value: !Ref OwnerName
- Key: owner-email-id
Value: !Ref OwnerEmail
- Key: patch-group
Value: !Ref PatchGroup
- Key: criticality-level
Value: !Ref CriticalityLevel
- Key: map-migrated
Value: !Ref MapMigrated
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class SnapStartEnabled(CloudFormationLintRule):
10 """Check if the SnapStart is enabled for certain java runtimes"""
11
12 id = "I2530"
13 shortdesc = "Validate that SnapStart is configured for >= Java11 runtimes"
14 description = (
15 "SnapStart is a no-cost feature that can increase performance up to 10x. "
16 "Enable SnapStart for Java 11 and greater runtimes"
17 )
18 source_url = "https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html"
19 tags = ["resources", "lambda"]
20
21 def __init__(self):
22 super().__init__()
23 self.resource_property_types.append("AWS::Lambda::Function")
24
25 def validate(self, runtime, path):
26 if not runtime:
27 return []
28
29 if not (runtime.startswith("java")) and runtime not in ["java8.al2", "java8"]:
30 return []
31
32 return [
33 RuleMatch(
34 path,
35 f"When using {runtime} configure SnapStart",
36 rule=self,
37 )
38 ]
39
```
Path: `src/cfnlint/rules/resources/lmbd/SnapStartSupported.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class SnapStartSupported(CloudFormationLintRule):
10 """Check if Lambda function using SnapStart has the correct runtimes"""
11
12 id = "E2530"
13 shortdesc = "SnapStart supports the configured runtime"
14 description = (
15 "To properly leverage SnapStart, you must have a runtime of Java11 or greater"
16 )
17 source_url = "https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html"
18 tags = ["resources", "lambda"]
19
20 def __init__(self):
21 super().__init__()
22 self.resource_property_types.append("AWS::Lambda::Function")
23 self.child_rules = {"I2530": None}
24
25 def match_resource_properties(self, properties, _, path, cfn):
26 """Check CloudFormation Properties"""
27 matches = []
28
29 for scenario in cfn.get_object_without_nested_conditions(properties, path):
30 props = scenario.get("Object")
31
32 runtime = props.get("Runtime")
33 snap_start = props.get("SnapStart")
34 if not snap_start:
35 if self.child_rules["I2530"]:
36 matches.extend(self.child_rules["I2530"].validate(runtime, path))
37 continue
38
39 if snap_start.get("ApplyOn") != "PublishedVersions":
40 continue
41
42 if (
43 runtime
44 and (not runtime.startswith("java"))
45 and runtime not in ["java8.al2", "java8"]
46 ):
47 matches.append(
48 RuleMatch(
49 path + ["SnapStart", "ApplyOn"],
50 f"{runtime} is not supported for SnapStart enabled functions",
51 )
52 )
53
54 return matches
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py b/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py
--- a/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py
+++ b/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py
@@ -23,7 +23,7 @@
self.resource_property_types.append("AWS::Lambda::Function")
def validate(self, runtime, path):
- if not runtime:
+ if not isinstance(runtime, str):
return []
if not (runtime.startswith("java")) and runtime not in ["java8.al2", "java8"]:
diff --git a/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py b/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py
--- a/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py
+++ b/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py
@@ -39,6 +39,10 @@
if snap_start.get("ApplyOn") != "PublishedVersions":
continue
+ # Validate runtime is a string before using startswith
+ if not isinstance(runtime, str):
+ continue
+
if (
runtime
and (not runtime.startswith("java"))
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py b/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py\n--- a/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py\n+++ b/src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py\n@@ -23,7 +23,7 @@\n self.resource_property_types.append(\"AWS::Lambda::Function\")\n \n def validate(self, runtime, path):\n- if not runtime:\n+ if not isinstance(runtime, str):\n return []\n \n if not (runtime.startswith(\"java\")) and runtime not in [\"java8.al2\", \"java8\"]:\ndiff --git a/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py b/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py\n--- a/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py\n+++ b/src/cfnlint/rules/resources/lmbd/SnapStartSupported.py\n@@ -39,6 +39,10 @@\n if snap_start.get(\"ApplyOn\") != \"PublishedVersions\":\n continue\n \n+ # Validate runtime is a string before using startswith\n+ if not isinstance(runtime, str):\n+ continue\n+\n if (\n runtime\n and (not runtime.startswith(\"java\"))\n", "issue": "E0002: Unknown exception while processing rule E2530: 'dict' object has no attribute 'startswith'\n### CloudFormation Lint Version\n\n0.82.0\n\n### What operating system are you using?\n\nwindows 11\n\n### Describe the bug\n\nreceive the E2530 error even though i dont have SnapStart in my lambda resource\r\n\r\n\r\n\n\n### Expected behavior\n\nno error\n\n### Reproduction template\n\nif i remove this lambda resource, it doesnt throw the error\r\n```\r\n Lambda:\r\n Type: AWS::Lambda::Function\r\n DependsOn:\r\n - LambdaLogGroup\r\n Properties:\r\n Architectures:\r\n - !If\r\n - ConditionLambdaArmSupport\r\n - !Ref OSArchitechture\r\n - x86_64\r\n Code:\r\n S3Bucket:\r\n Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-artifact-s3-bucket'\r\n S3Key: !Ref LambdaS3Key\r\n S3ObjectVersion: !Ref LambdaS3ObjectVersion\r\n Description: !Sub 'Lambda Function - ${LambdaPurpose}'\r\n FunctionName: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-lambda-${LambdaPurpose}'\r\n Handler: !Ref LambdaHandler\r\n Layers:\r\n - !Ref LambdaLayer\r\n MemorySize: !Ref LambdaMemory\r\n Role: !GetAtt LambdaRole.Arn\r\n Runtime: !Ref LambdaRuntime\r\n Timeout: !Ref LambdaTimeout\r\n VpcConfig: !If\r\n - ConditionVpcAttachment\r\n - SecurityGroupIds:\r\n - Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-sg-${LambdaPurpose}'\r\n SubnetIds:\r\n - Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${VpcAttachment}-subnet-1-id'\r\n - Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${VpcAttachment}-subnet-2-id'\r\n - !If\r\n - ConditionThirdAZ\r\n - Fn::ImportValue: !Sub '${Region}-${BusinessUnit}-${Environment}-${VpcAttachment}-subnet-3-id'\r\n - !Ref AWS::NoValue\r\n - !Ref AWS::NoValue\r\n Tags:\r\n - Key: Name\r\n Value: !Sub '${Region}-${BusinessUnit}-${Environment}-${AppName}-lambda-${LambdaPurpose}'\r\n - Key: region-abbreviation\r\n Value: !Ref Region\r\n - Key: business-unit\r\n Value: !Ref BusinessUnit\r\n - Key: environment-type\r\n Value: !Ref Environment\r\n - Key: app-name\r\n Value: !Ref AppName\r\n - Key: service-type\r\n Value: !Ref Service\r\n - Key: cost-center\r\n Value: !Ref CostCenter\r\n - Key: compliance-type\r\n Value: !Ref Compliance\r\n - Key: owner-name\r\n Value: !Ref OwnerName\r\n - Key: owner-email-id\r\n Value: !Ref OwnerEmail\r\n - Key: patch-group\r\n Value: !Ref PatchGroup\r\n - Key: criticality-level\r\n Value: !Ref CriticalityLevel\r\n - Key: map-migrated\r\n Value: !Ref MapMigrated\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SnapStartEnabled(CloudFormationLintRule):\n \"\"\"Check if the SnapStart is enabled for certain java runtimes\"\"\"\n\n id = \"I2530\"\n shortdesc = \"Validate that SnapStart is configured for >= Java11 runtimes\"\n description = (\n \"SnapStart is a no-cost feature that can increase performance up to 10x. \"\n \"Enable SnapStart for Java 11 and greater runtimes\"\n )\n source_url = \"https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html\"\n tags = [\"resources\", \"lambda\"]\n\n def __init__(self):\n super().__init__()\n self.resource_property_types.append(\"AWS::Lambda::Function\")\n\n def validate(self, runtime, path):\n if not runtime:\n return []\n\n if not (runtime.startswith(\"java\")) and runtime not in [\"java8.al2\", \"java8\"]:\n return []\n\n return [\n RuleMatch(\n path,\n f\"When using {runtime} configure SnapStart\",\n rule=self,\n )\n ]\n", "path": "src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py"}, {"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SnapStartSupported(CloudFormationLintRule):\n \"\"\"Check if Lambda function using SnapStart has the correct runtimes\"\"\"\n\n id = \"E2530\"\n shortdesc = \"SnapStart supports the configured runtime\"\n description = (\n \"To properly leverage SnapStart, you must have a runtime of Java11 or greater\"\n )\n source_url = \"https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html\"\n tags = [\"resources\", \"lambda\"]\n\n def __init__(self):\n super().__init__()\n self.resource_property_types.append(\"AWS::Lambda::Function\")\n self.child_rules = {\"I2530\": None}\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n for scenario in cfn.get_object_without_nested_conditions(properties, path):\n props = scenario.get(\"Object\")\n\n runtime = props.get(\"Runtime\")\n snap_start = props.get(\"SnapStart\")\n if not snap_start:\n if self.child_rules[\"I2530\"]:\n matches.extend(self.child_rules[\"I2530\"].validate(runtime, path))\n continue\n\n if snap_start.get(\"ApplyOn\") != \"PublishedVersions\":\n continue\n\n if (\n runtime\n and (not runtime.startswith(\"java\"))\n and runtime not in [\"java8.al2\", \"java8\"]\n ):\n matches.append(\n RuleMatch(\n path + [\"SnapStart\", \"ApplyOn\"],\n f\"{runtime} is not supported for SnapStart enabled functions\",\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/lmbd/SnapStartSupported.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SnapStartEnabled(CloudFormationLintRule):\n \"\"\"Check if the SnapStart is enabled for certain java runtimes\"\"\"\n\n id = \"I2530\"\n shortdesc = \"Validate that SnapStart is configured for >= Java11 runtimes\"\n description = (\n \"SnapStart is a no-cost feature that can increase performance up to 10x. \"\n \"Enable SnapStart for Java 11 and greater runtimes\"\n )\n source_url = \"https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html\"\n tags = [\"resources\", \"lambda\"]\n\n def __init__(self):\n super().__init__()\n self.resource_property_types.append(\"AWS::Lambda::Function\")\n\n def validate(self, runtime, path):\n if not isinstance(runtime, str):\n return []\n\n if not (runtime.startswith(\"java\")) and runtime not in [\"java8.al2\", \"java8\"]:\n return []\n\n return [\n RuleMatch(\n path,\n f\"When using {runtime} configure SnapStart\",\n rule=self,\n )\n ]\n", "path": "src/cfnlint/rules/resources/lmbd/SnapStartEnabled.py"}, {"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SnapStartSupported(CloudFormationLintRule):\n \"\"\"Check if Lambda function using SnapStart has the correct runtimes\"\"\"\n\n id = \"E2530\"\n shortdesc = \"SnapStart supports the configured runtime\"\n description = (\n \"To properly leverage SnapStart, you must have a runtime of Java11 or greater\"\n )\n source_url = \"https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html\"\n tags = [\"resources\", \"lambda\"]\n\n def __init__(self):\n super().__init__()\n self.resource_property_types.append(\"AWS::Lambda::Function\")\n self.child_rules = {\"I2530\": None}\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n for scenario in cfn.get_object_without_nested_conditions(properties, path):\n props = scenario.get(\"Object\")\n\n runtime = props.get(\"Runtime\")\n snap_start = props.get(\"SnapStart\")\n if not snap_start:\n if self.child_rules[\"I2530\"]:\n matches.extend(self.child_rules[\"I2530\"].validate(runtime, path))\n continue\n\n if snap_start.get(\"ApplyOn\") != \"PublishedVersions\":\n continue\n\n # Validate runtime is a string before using startswith\n if not isinstance(runtime, str):\n continue\n\n if (\n runtime\n and (not runtime.startswith(\"java\"))\n and runtime not in [\"java8.al2\", \"java8\"]\n ):\n matches.append(\n RuleMatch(\n path + [\"SnapStart\", \"ApplyOn\"],\n f\"{runtime} is not supported for SnapStart enabled functions\",\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/lmbd/SnapStartSupported.py"}]} | 1,806 | 285 |
gh_patches_debug_10783 | rasdani/github-patches | git_diff | ros__ros_comm-1568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
roslaunch gives deceptive error if chmod +x isn't set
Trying to `roslaunch pack_name launcher.launch` fails with the following error if your source file exists in the appropriate spot, but does not have `+x` permission set.
```
ERROR: cannot launch node of type [pack_name/node_name.py]:
can't locate node [node_name.py] in package [pack_name]
```
Looks like it is from:
https://github.com/ros/ros_comm/blob/eb7e686a9a58109087c956eed3cfa6f2ed3eec3d/tools/roslaunch/src/roslaunch/node_args.py#L256
I spent a lot of time mucking around with path, ENV, packages.xml, namespaces, and the launch file, thinking it is a problem with a path or naming scheme. Once you know what you are looking for, it is obvious from questions on the wiki that this is a [commonly](https://answers.ros.org/question/244463/errorcannot-launch-node-of-typetesttestcannot-locate-node-test-in-package-test/) [encountered](https://answers.ros.org/question/209426/i-cant-launch-a-node/) [problem](https://answers.ros.org/question/241690/roslaunch-error-error-cannot-launch-node-of-type/).
If the file is `+x` but lacks a shebang, the more helpful error occurs:
```
Unable to launch [foo/image_nexus_node-2].
If it is a script, you may be missing a '#!' declaration at the top.
```
Could someone add a more useful helper text here to indicate that this might be a permission error and not a path error? Perhaps:
```
raise NodeParamsException("can't locate node [%s] in package [%s].
Make sure file exists in package path and permission is set to executable (chmod +x)"
%(node.type, node.package))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/roslaunch/src/roslaunch/node_args.py`
Content:
```
1 # Software License Agreement (BSD License)
2 #
3 # Copyright (c) 2009, Willow Garage, Inc.
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
8 # are met:
9 #
10 # * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 # * Redistributions in binary form must reproduce the above
13 # copyright notice, this list of conditions and the following
14 # disclaimer in the documentation and/or other materials provided
15 # with the distribution.
16 # * Neither the name of Willow Garage, Inc. nor the names of its
17 # contributors may be used to endorse or promote products derived
18 # from this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 # POSSIBILITY OF SUCH DAMAGE.
32 #
33 # Revision $Id$
34
35 from __future__ import print_function
36
37 """
38 Utility module of roslaunch that computes the command-line arguments
39 for a node.
40 """
41
42 import logging
43 import os
44 import shlex
45 import sys
46 import time
47
48 import rospkg
49 import rosgraph
50 import rosgraph.names
51 from rosgraph.names import script_resolve_name
52
53 import roslib.packages
54
55 from . import substitution_args
56
57 from roslaunch.core import setup_env, local_machine, RLException
58 from roslaunch.config import load_config_default
59 import roslaunch.xmlloader
60
61 class NodeParamsException(Exception):
62 """
63 Exception to indicate that node parameters were invalid
64 """
65 pass
66
67 def get_node_list(config):
68 """
69 @param config: roslaunch config
70 @type config: ROSLaunchConfig
71 @return: list of node names in config
72 @rtype: [str]
73 """
74 l = [_resolved_name(node) for node in config.nodes] + [_resolved_name(test) for test in config.tests]
75 # filter out unnamed nodes
76 return [x for x in l if x]
77
78 def print_node_list(roslaunch_files):
79 """
80 Print list of nodes to screen. Will cause system exit if exception
81 occurs. This is a subroutine for the roslaunch main handler.
82
83 @param roslaunch_files: list of launch files to load
84 @type roslaunch_files: str
85 """
86 try:
87 loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)
88 config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)
89 node_list = get_node_list(config)
90 print('\n'.join(node_list))
91 except RLException as e:
92 print(str(e), file=sys.stderr)
93 sys.exit(1)
94
95 def print_node_args(node_name, roslaunch_files):
96 """
97 Print arguments of node to screen. Will cause system exit if
98 exception occurs. This is a subroutine for the roslaunch main
99 handler.
100
101 @param node_name: node name
102 @type node_name: str
103 @param roslaunch_files: list of launch files to load
104 @type roslaunch_files: str
105 """
106 try:
107 node_name = script_resolve_name('roslaunch', node_name)
108 args = get_node_args(node_name, roslaunch_files)
109 print(' '.join(args))
110 except RLException as e:
111 print(str(e), file=sys.stderr)
112 sys.exit(1)
113
114 def _resolved_name(node):
115 if node.name:
116 # $(anon id) passthrough
117 if node.name.startswith('$'):
118 return node.name
119 else:
120 return rosgraph.names.ns_join(node.namespace, node.name)
121 else:
122 return None
123
124 def print_node_filename(node_name, roslaunch_files):
125 try:
126 # #2309
127 node_name = script_resolve_name('roslaunch', node_name)
128
129 loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)
130 config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)
131 nodes = [n for n in config.nodes if _resolved_name(n) == node_name] + \
132 [t for t in config.tests if _resolved_name(t) == node_name]
133
134 if len(nodes) > 1:
135 raise RLException("ERROR: multiple nodes named [%s] in [%s].\nPlease fix the launch files as duplicate names are not allowed."%(node_name, ', '.join(roslaunch_files)))
136 if not nodes:
137 print('ERROR: cannot find node named [%s]. Run \n\troslaunch --nodes <files>\nto see list of node names.' % (node_name), file=sys.stderr)
138 else:
139 print(nodes[0].filename)
140
141 except RLException as e:
142 print(str(e), file=sys.stderr)
143 sys.exit(1)
144
145 def get_node_args(node_name, roslaunch_files):
146 """
147 Get the node arguments for a node in roslaunch_files.
148
149 @param node_name: name of node in roslaunch_files.
150 @type node_name: str
151 @param roslaunch_files: roslaunch file names
152 @type roslaunch_files: [str]
153 @return: list of command-line arguments used to launch node_name
154 @rtype: [str]
155 @raise RLException: if node args cannot be retrieved
156 """
157
158 # we have to create our own XmlLoader so that we can use the same
159 # resolution context for substitution args
160
161 loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)
162 config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)
163 (node_name) = substitution_args.resolve_args((node_name), resolve_anon=False)
164 node_name = script_resolve_name('roslaunch', node_name) if not node_name.startswith('$') else node_name
165
166 node = [n for n in config.nodes if _resolved_name(n) == node_name] + \
167 [n for n in config.tests if _resolved_name(n) == node_name]
168 if not node:
169 node_list = get_node_list(config)
170 node_list_str = '\n'.join([" * %s"%x for x in node_list])
171 raise RLException("ERROR: Cannot find node named [%s] in [%s].\nNode names are:\n%s"%(node_name, ', '.join(roslaunch_files), node_list_str))
172 elif len(node) > 1:
173 raise RLException("ERROR: multiple nodes named [%s] in [%s].\nPlease fix the launch files as duplicate names are not allowed."%(node_name, ', '.join(roslaunch_files)))
174 node = node[0]
175
176 master_uri = rosgraph.get_master_uri()
177 machine = local_machine()
178 env = setup_env(node, machine, master_uri)
179
180 # remove setting identical to current environment for easier debugging
181 to_remove = []
182 for k in env.keys():
183 if env[k] == os.environ.get(k, None):
184 to_remove.append(k)
185 for k in to_remove:
186 del env[k]
187
188 # resolve node name for generating args
189 args = create_local_process_args(node, machine)
190 # join environment vars are bash prefix args
191 return ["%s=%s"%(k, v) for k, v in env.items()] + args
192
193 def _launch_prefix_args(node):
194 if node.launch_prefix:
195 prefix = node.launch_prefix
196 try:
197 if type(prefix) == unicode:
198 prefix = prefix.encode('UTF-8')
199 except NameError:
200 pass
201 return shlex.split(prefix)
202 else:
203 return []
204
205 _rospack = None
206
207
208 def create_local_process_args(node, machine, env=None):
209 """
210 Subroutine for creating node arguments.
211
212 :param env: override os.environ. Warning, this does not override
213 substitution args in node configuration (for now), ``dict``
214 :returns: arguments for node process, ``[str]``
215 :raises: :exc:`NodeParamsException` If args cannot be constructed for Node
216 as specified (e.g. the node type does not exist)
217 """
218 global _rospack
219 if not node.name:
220 raise ValueError("node name must be defined")
221 # create rospack instance if no cached value is available or for custom environments
222 if not _rospack or env is not None:
223 rospack = rospkg.RosPack(rospkg.get_ros_paths(env=env))
224 # cache rospack instance for default environment
225 if env is None:
226 _rospack = rospack
227 else:
228 rospack = _rospack
229
230 # - Construct rosrun command
231 remap_args = ["%s:=%s"%(src,dst) for src, dst in node.remap_args]
232 resolve_dict = {}
233
234 #resolve args evaluates substitution commands
235 #shlex parses a command string into a list of args
236 # - for the local process args, we *do* resolve the anon tag so that the user can execute
237 # - the node name and args must be resolved together in case the args refer to the anon node name
238 (node_name) = substitution_args.resolve_args((node.name), context=resolve_dict, resolve_anon=True)
239 node.name = node_name
240 remap_args.append('__name:=%s'%node_name)
241
242 resolved = substitution_args.resolve_args(node.args, context=resolve_dict, resolve_anon=True)
243 try:
244 if type(resolved) == unicode:
245 resolved = resolved.encode('UTF-8') #attempt to force to string for shlex/subprocess
246 except NameError:
247 pass
248 args = shlex.split(resolved) + remap_args
249 try:
250 #TODO:fuerte: pass through rospack and catkin cache
251 matches = roslib.packages.find_node(node.package, node.type, rospack=rospack)
252 except rospkg.ResourceNotFound as e:
253 # multiple nodes, invalid package
254 raise NodeParamsException(str(e))
255 if not matches:
256 raise NodeParamsException("can't locate node [%s] in package [%s]"%(node.type, node.package))
257 else:
258 # old behavior was to take first, do we want to change this in Fuerte-style?
259 cmd = matches[0]
260 if not cmd:
261 raise NodeParamsException("Cannot locate node of type [%s] in package [%s]"%(node.type, node.package))
262 cmd = [cmd]
263 if sys.platform in ['win32']:
264 if os.path.splitext(cmd[0])[1] == '.py':
265 cmd = ['python'] + cmd
266 return _launch_prefix_args(node) + cmd + args
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/roslaunch/src/roslaunch/node_args.py b/tools/roslaunch/src/roslaunch/node_args.py
--- a/tools/roslaunch/src/roslaunch/node_args.py
+++ b/tools/roslaunch/src/roslaunch/node_args.py
@@ -253,7 +253,7 @@
# multiple nodes, invalid package
raise NodeParamsException(str(e))
if not matches:
- raise NodeParamsException("can't locate node [%s] in package [%s]"%(node.type, node.package))
+ raise NodeParamsException("Cannot locate node of type [%s] in package [%s]. Make sure file exists in package path and permission is set to executable (chmod +x)"%(node.type, node.package))
else:
# old behavior was to take first, do we want to change this in Fuerte-style?
cmd = matches[0]
| {"golden_diff": "diff --git a/tools/roslaunch/src/roslaunch/node_args.py b/tools/roslaunch/src/roslaunch/node_args.py\n--- a/tools/roslaunch/src/roslaunch/node_args.py\n+++ b/tools/roslaunch/src/roslaunch/node_args.py\n@@ -253,7 +253,7 @@\n # multiple nodes, invalid package\n raise NodeParamsException(str(e))\n if not matches:\n- raise NodeParamsException(\"can't locate node [%s] in package [%s]\"%(node.type, node.package))\n+ raise NodeParamsException(\"Cannot locate node of type [%s] in package [%s]. Make sure file exists in package path and permission is set to executable (chmod +x)\"%(node.type, node.package))\n else:\n # old behavior was to take first, do we want to change this in Fuerte-style?\n cmd = matches[0]\n", "issue": "roslaunch gives deceptive error if chmod +x isn't set\nTrying to `roslaunch pack_name launcher.launch` fails with the following error if your source file exists in the appropriate spot, but does not have `+x` permission set. \r\n\r\n```\r\nERROR: cannot launch node of type [pack_name/node_name.py]: \r\ncan't locate node [node_name.py] in package [pack_name]\r\n```\r\n\r\nLooks like it is from: \r\n\r\nhttps://github.com/ros/ros_comm/blob/eb7e686a9a58109087c956eed3cfa6f2ed3eec3d/tools/roslaunch/src/roslaunch/node_args.py#L256\r\n\r\nI spent a lot of time mucking around with path, ENV, packages.xml, namespaces, and the launch file, thinking it is a problem with a path or naming scheme. Once you know what you are looking for, it is obvious from questions on the wiki that this is a [commonly](https://answers.ros.org/question/244463/errorcannot-launch-node-of-typetesttestcannot-locate-node-test-in-package-test/) [encountered](https://answers.ros.org/question/209426/i-cant-launch-a-node/) [problem](https://answers.ros.org/question/241690/roslaunch-error-error-cannot-launch-node-of-type/). \r\n\r\nIf the file is `+x` but lacks a shebang, the more helpful error occurs:\r\n\r\n```\r\nUnable to launch [foo/image_nexus_node-2]. \r\nIf it is a script, you may be missing a '#!' declaration at the top.\r\n```\r\nCould someone add a more useful helper text here to indicate that this might be a permission error and not a path error? Perhaps: \r\n\r\n```\r\nraise NodeParamsException(\"can't locate node [%s] in package [%s]. \r\nMake sure file exists in package path and permission is set to executable (chmod +x)\"\r\n%(node.type, node.package))\r\n```\n", "before_files": [{"content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2009, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Revision $Id$\n\nfrom __future__ import print_function\n\n\"\"\"\nUtility module of roslaunch that computes the command-line arguments\nfor a node.\n\"\"\"\n\nimport logging\nimport os\nimport shlex\nimport sys\nimport time\n\nimport rospkg\nimport rosgraph\nimport rosgraph.names\nfrom rosgraph.names import script_resolve_name\n\nimport roslib.packages\n\nfrom . import substitution_args\n\nfrom roslaunch.core import setup_env, local_machine, RLException\nfrom roslaunch.config import load_config_default\nimport roslaunch.xmlloader\n\nclass NodeParamsException(Exception):\n \"\"\"\n Exception to indicate that node parameters were invalid\n \"\"\"\n pass\n\ndef get_node_list(config):\n \"\"\"\n @param config: roslaunch config\n @type config: ROSLaunchConfig\n @return: list of node names in config\n @rtype: [str]\n \"\"\"\n l = [_resolved_name(node) for node in config.nodes] + [_resolved_name(test) for test in config.tests]\n # filter out unnamed nodes\n return [x for x in l if x]\n\ndef print_node_list(roslaunch_files):\n \"\"\"\n Print list of nodes to screen. Will cause system exit if exception\n occurs. This is a subroutine for the roslaunch main handler.\n\n @param roslaunch_files: list of launch files to load\n @type roslaunch_files: str\n \"\"\"\n try:\n loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)\n config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)\n node_list = get_node_list(config)\n print('\\n'.join(node_list))\n except RLException as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n\ndef print_node_args(node_name, roslaunch_files):\n \"\"\"\n Print arguments of node to screen. Will cause system exit if\n exception occurs. This is a subroutine for the roslaunch main\n handler.\n \n @param node_name: node name\n @type node_name: str\n @param roslaunch_files: list of launch files to load\n @type roslaunch_files: str\n \"\"\"\n try:\n node_name = script_resolve_name('roslaunch', node_name)\n args = get_node_args(node_name, roslaunch_files)\n print(' '.join(args))\n except RLException as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n \ndef _resolved_name(node):\n if node.name:\n # $(anon id) passthrough\n if node.name.startswith('$'):\n return node.name\n else:\n return rosgraph.names.ns_join(node.namespace, node.name)\n else:\n return None\n\ndef print_node_filename(node_name, roslaunch_files):\n try:\n # #2309\n node_name = script_resolve_name('roslaunch', node_name)\n \n loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)\n config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)\n nodes = [n for n in config.nodes if _resolved_name(n) == node_name] + \\\n [t for t in config.tests if _resolved_name(t) == node_name]\n\n if len(nodes) > 1:\n raise RLException(\"ERROR: multiple nodes named [%s] in [%s].\\nPlease fix the launch files as duplicate names are not allowed.\"%(node_name, ', '.join(roslaunch_files)))\n if not nodes:\n print('ERROR: cannot find node named [%s]. Run \\n\\troslaunch --nodes <files>\\nto see list of node names.' % (node_name), file=sys.stderr)\n else:\n print(nodes[0].filename)\n \n except RLException as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n\ndef get_node_args(node_name, roslaunch_files):\n \"\"\"\n Get the node arguments for a node in roslaunch_files. \n\n @param node_name: name of node in roslaunch_files.\n @type node_name: str\n @param roslaunch_files: roslaunch file names\n @type roslaunch_files: [str]\n @return: list of command-line arguments used to launch node_name\n @rtype: [str]\n @raise RLException: if node args cannot be retrieved\n \"\"\"\n \n # we have to create our own XmlLoader so that we can use the same\n # resolution context for substitution args\n\n loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)\n config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)\n (node_name) = substitution_args.resolve_args((node_name), resolve_anon=False)\n node_name = script_resolve_name('roslaunch', node_name) if not node_name.startswith('$') else node_name\n \n node = [n for n in config.nodes if _resolved_name(n) == node_name] + \\\n [n for n in config.tests if _resolved_name(n) == node_name]\n if not node:\n node_list = get_node_list(config)\n node_list_str = '\\n'.join([\" * %s\"%x for x in node_list])\n raise RLException(\"ERROR: Cannot find node named [%s] in [%s].\\nNode names are:\\n%s\"%(node_name, ', '.join(roslaunch_files), node_list_str))\n elif len(node) > 1:\n raise RLException(\"ERROR: multiple nodes named [%s] in [%s].\\nPlease fix the launch files as duplicate names are not allowed.\"%(node_name, ', '.join(roslaunch_files)))\n node = node[0]\n \n master_uri = rosgraph.get_master_uri()\n machine = local_machine()\n env = setup_env(node, machine, master_uri)\n\n # remove setting identical to current environment for easier debugging\n to_remove = []\n for k in env.keys():\n if env[k] == os.environ.get(k, None):\n to_remove.append(k)\n for k in to_remove:\n del env[k]\n\n # resolve node name for generating args\n args = create_local_process_args(node, machine)\n # join environment vars are bash prefix args\n return [\"%s=%s\"%(k, v) for k, v in env.items()] + args\n \ndef _launch_prefix_args(node):\n if node.launch_prefix:\n prefix = node.launch_prefix\n try:\n if type(prefix) == unicode:\n prefix = prefix.encode('UTF-8')\n except NameError:\n pass\n return shlex.split(prefix)\n else:\n return []\n\n_rospack = None\n\n\ndef create_local_process_args(node, machine, env=None):\n \"\"\"\n Subroutine for creating node arguments.\n\n :param env: override os.environ. Warning, this does not override\n substitution args in node configuration (for now), ``dict``\n :returns: arguments for node process, ``[str]``\n :raises: :exc:`NodeParamsException` If args cannot be constructed for Node\n as specified (e.g. the node type does not exist)\n \"\"\"\n global _rospack\n if not node.name:\n raise ValueError(\"node name must be defined\")\n # create rospack instance if no cached value is available or for custom environments\n if not _rospack or env is not None:\n rospack = rospkg.RosPack(rospkg.get_ros_paths(env=env))\n # cache rospack instance for default environment\n if env is None:\n _rospack = rospack\n else:\n rospack = _rospack\n \n # - Construct rosrun command\n remap_args = [\"%s:=%s\"%(src,dst) for src, dst in node.remap_args]\n resolve_dict = {}\n\n #resolve args evaluates substitution commands\n #shlex parses a command string into a list of args\n # - for the local process args, we *do* resolve the anon tag so that the user can execute\n # - the node name and args must be resolved together in case the args refer to the anon node name\n (node_name) = substitution_args.resolve_args((node.name), context=resolve_dict, resolve_anon=True)\n node.name = node_name\n remap_args.append('__name:=%s'%node_name)\n \n resolved = substitution_args.resolve_args(node.args, context=resolve_dict, resolve_anon=True)\n try:\n if type(resolved) == unicode:\n resolved = resolved.encode('UTF-8') #attempt to force to string for shlex/subprocess\n except NameError:\n pass\n args = shlex.split(resolved) + remap_args\n try:\n #TODO:fuerte: pass through rospack and catkin cache\n matches = roslib.packages.find_node(node.package, node.type, rospack=rospack)\n except rospkg.ResourceNotFound as e:\n # multiple nodes, invalid package\n raise NodeParamsException(str(e))\n if not matches:\n raise NodeParamsException(\"can't locate node [%s] in package [%s]\"%(node.type, node.package))\n else:\n # old behavior was to take first, do we want to change this in Fuerte-style?\n cmd = matches[0]\n if not cmd:\n raise NodeParamsException(\"Cannot locate node of type [%s] in package [%s]\"%(node.type, node.package))\n cmd = [cmd]\n if sys.platform in ['win32']:\n if os.path.splitext(cmd[0])[1] == '.py':\n cmd = ['python'] + cmd\n return _launch_prefix_args(node) + cmd + args\n", "path": "tools/roslaunch/src/roslaunch/node_args.py"}], "after_files": [{"content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2009, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Revision $Id$\n\nfrom __future__ import print_function\n\n\"\"\"\nUtility module of roslaunch that computes the command-line arguments\nfor a node.\n\"\"\"\n\nimport logging\nimport os\nimport shlex\nimport sys\nimport time\n\nimport rospkg\nimport rosgraph\nimport rosgraph.names\nfrom rosgraph.names import script_resolve_name\n\nimport roslib.packages\n\nfrom . import substitution_args\n\nfrom roslaunch.core import setup_env, local_machine, RLException\nfrom roslaunch.config import load_config_default\nimport roslaunch.xmlloader\n\nclass NodeParamsException(Exception):\n \"\"\"\n Exception to indicate that node parameters were invalid\n \"\"\"\n pass\n\ndef get_node_list(config):\n \"\"\"\n @param config: roslaunch config\n @type config: ROSLaunchConfig\n @return: list of node names in config\n @rtype: [str]\n \"\"\"\n l = [_resolved_name(node) for node in config.nodes] + [_resolved_name(test) for test in config.tests]\n # filter out unnamed nodes\n return [x for x in l if x]\n\ndef print_node_list(roslaunch_files):\n \"\"\"\n Print list of nodes to screen. Will cause system exit if exception\n occurs. This is a subroutine for the roslaunch main handler.\n\n @param roslaunch_files: list of launch files to load\n @type roslaunch_files: str\n \"\"\"\n try:\n loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)\n config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)\n node_list = get_node_list(config)\n print('\\n'.join(node_list))\n except RLException as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n\ndef print_node_args(node_name, roslaunch_files):\n \"\"\"\n Print arguments of node to screen. Will cause system exit if\n exception occurs. This is a subroutine for the roslaunch main\n handler.\n \n @param node_name: node name\n @type node_name: str\n @param roslaunch_files: list of launch files to load\n @type roslaunch_files: str\n \"\"\"\n try:\n node_name = script_resolve_name('roslaunch', node_name)\n args = get_node_args(node_name, roslaunch_files)\n print(' '.join(args))\n except RLException as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n \ndef _resolved_name(node):\n if node.name:\n # $(anon id) passthrough\n if node.name.startswith('$'):\n return node.name\n else:\n return rosgraph.names.ns_join(node.namespace, node.name)\n else:\n return None\n\ndef print_node_filename(node_name, roslaunch_files):\n try:\n # #2309\n node_name = script_resolve_name('roslaunch', node_name)\n \n loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)\n config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)\n nodes = [n for n in config.nodes if _resolved_name(n) == node_name] + \\\n [t for t in config.tests if _resolved_name(t) == node_name]\n\n if len(nodes) > 1:\n raise RLException(\"ERROR: multiple nodes named [%s] in [%s].\\nPlease fix the launch files as duplicate names are not allowed.\"%(node_name, ', '.join(roslaunch_files)))\n if not nodes:\n print('ERROR: cannot find node named [%s]. Run \\n\\troslaunch --nodes <files>\\nto see list of node names.' % (node_name), file=sys.stderr)\n else:\n print(nodes[0].filename)\n \n except RLException as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n\ndef get_node_args(node_name, roslaunch_files):\n \"\"\"\n Get the node arguments for a node in roslaunch_files. \n\n @param node_name: name of node in roslaunch_files.\n @type node_name: str\n @param roslaunch_files: roslaunch file names\n @type roslaunch_files: [str]\n @return: list of command-line arguments used to launch node_name\n @rtype: [str]\n @raise RLException: if node args cannot be retrieved\n \"\"\"\n \n # we have to create our own XmlLoader so that we can use the same\n # resolution context for substitution args\n\n loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)\n config = load_config_default(roslaunch_files, None, loader=loader, verbose=False, assign_machines=False)\n (node_name) = substitution_args.resolve_args((node_name), resolve_anon=False)\n node_name = script_resolve_name('roslaunch', node_name) if not node_name.startswith('$') else node_name\n \n node = [n for n in config.nodes if _resolved_name(n) == node_name] + \\\n [n for n in config.tests if _resolved_name(n) == node_name]\n if not node:\n node_list = get_node_list(config)\n node_list_str = '\\n'.join([\" * %s\"%x for x in node_list])\n raise RLException(\"ERROR: Cannot find node named [%s] in [%s].\\nNode names are:\\n%s\"%(node_name, ', '.join(roslaunch_files), node_list_str))\n elif len(node) > 1:\n raise RLException(\"ERROR: multiple nodes named [%s] in [%s].\\nPlease fix the launch files as duplicate names are not allowed.\"%(node_name, ', '.join(roslaunch_files)))\n node = node[0]\n \n master_uri = rosgraph.get_master_uri()\n machine = local_machine()\n env = setup_env(node, machine, master_uri)\n\n # remove setting identical to current environment for easier debugging\n to_remove = []\n for k in env.keys():\n if env[k] == os.environ.get(k, None):\n to_remove.append(k)\n for k in to_remove:\n del env[k]\n\n # resolve node name for generating args\n args = create_local_process_args(node, machine)\n # join environment vars are bash prefix args\n return [\"%s=%s\"%(k, v) for k, v in env.items()] + args\n \ndef _launch_prefix_args(node):\n if node.launch_prefix:\n prefix = node.launch_prefix\n try:\n if type(prefix) == unicode:\n prefix = prefix.encode('UTF-8')\n except NameError:\n pass\n return shlex.split(prefix)\n else:\n return []\n\n_rospack = None\n\n\ndef create_local_process_args(node, machine, env=None):\n \"\"\"\n Subroutine for creating node arguments.\n\n :param env: override os.environ. Warning, this does not override\n substitution args in node configuration (for now), ``dict``\n :returns: arguments for node process, ``[str]``\n :raises: :exc:`NodeParamsException` If args cannot be constructed for Node\n as specified (e.g. the node type does not exist)\n \"\"\"\n global _rospack\n if not node.name:\n raise ValueError(\"node name must be defined\")\n # create rospack instance if no cached value is available or for custom environments\n if not _rospack or env is not None:\n rospack = rospkg.RosPack(rospkg.get_ros_paths(env=env))\n # cache rospack instance for default environment\n if env is None:\n _rospack = rospack\n else:\n rospack = _rospack\n \n # - Construct rosrun command\n remap_args = [\"%s:=%s\"%(src,dst) for src, dst in node.remap_args]\n resolve_dict = {}\n\n #resolve args evaluates substitution commands\n #shlex parses a command string into a list of args\n # - for the local process args, we *do* resolve the anon tag so that the user can execute\n # - the node name and args must be resolved together in case the args refer to the anon node name\n (node_name) = substitution_args.resolve_args((node.name), context=resolve_dict, resolve_anon=True)\n node.name = node_name\n remap_args.append('__name:=%s'%node_name)\n \n resolved = substitution_args.resolve_args(node.args, context=resolve_dict, resolve_anon=True)\n try:\n if type(resolved) == unicode:\n resolved = resolved.encode('UTF-8') #attempt to force to string for shlex/subprocess\n except NameError:\n pass\n args = shlex.split(resolved) + remap_args\n try:\n #TODO:fuerte: pass through rospack and catkin cache\n matches = roslib.packages.find_node(node.package, node.type, rospack=rospack)\n except rospkg.ResourceNotFound as e:\n # multiple nodes, invalid package\n raise NodeParamsException(str(e))\n if not matches:\n raise NodeParamsException(\"Cannot locate node of type [%s] in package [%s]. Make sure file exists in package path and permission is set to executable (chmod +x)\"%(node.type, node.package))\n else:\n # old behavior was to take first, do we want to change this in Fuerte-style?\n cmd = matches[0]\n if not cmd:\n raise NodeParamsException(\"Cannot locate node of type [%s] in package [%s]\"%(node.type, node.package))\n cmd = [cmd]\n if sys.platform in ['win32']:\n if os.path.splitext(cmd[0])[1] == '.py':\n cmd = ['python'] + cmd\n return _launch_prefix_args(node) + cmd + args\n", "path": "tools/roslaunch/src/roslaunch/node_args.py"}]} | 3,870 | 200 |
gh_patches_debug_51453 | rasdani/github-patches | git_diff | lutris__lutris-402 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running game changes to default X cursor in main window

I think [this is the code](https://github.com/lutris/lutris/blob/cbe6f5d5d982543a66f95aa62f0d8e26dd462470/lutris/util/display.py#L10) that sets the cursor. This was introduced in the commit https://github.com/lutris/lutris/commit/4bb2d78e5c3626057f9dc10dcc27f74ae67853b9
Right now it's using [GdkCursorType](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#GdkCursorType) to set cursor, but it seems like we should be using [gdk_cursor_new_from_name()](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#gdk-cursor-new-from-name) instead which is the recommended way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/display.py`
Content:
```
1 import subprocess
2 from gi.repository import Gdk
3
4 from lutris.util.log import logger
5
6
7 def set_cursor(name, window, display=None):
8 """Set a named mouse cursor for the given window."""
9 cursors = {
10 'default': Gdk.CursorType.ARROW,
11 'wait': Gdk.CursorType.WATCH,
12 }
13
14 if not display:
15 display = Gdk.Display.get_default()
16 cursor = Gdk.Cursor.new_for_display(display, cursors[name])
17 window.set_cursor(cursor)
18
19
20 def get_vidmodes():
21 xrandr_output = subprocess.Popen(["xrandr"],
22 stdout=subprocess.PIPE).communicate()[0]
23 return list([line for line in xrandr_output.decode().split("\n")])
24
25
26 def get_outputs():
27 """Return list of tuples containing output name and geometry."""
28 outputs = []
29 vid_modes = get_vidmodes()
30 if not vid_modes:
31 logger.error("xrandr didn't return anything")
32 return []
33 for line in vid_modes:
34 parts = line.split()
35 if len(parts) < 2:
36 continue
37 if parts[1] == 'connected':
38 if len(parts) == 2:
39 continue
40 geom = parts[2] if parts[2] != 'primary' else parts[3]
41 if geom.startswith('('): # Screen turned off, no geometry
42 continue
43 outputs.append((parts[0], geom))
44 return outputs
45
46
47 def get_output_names():
48 return [output[0] for output in get_outputs()]
49
50
51 def turn_off_except(display):
52 for output in get_outputs():
53 if output[0] != display:
54 subprocess.Popen(["xrandr", "--output", output[0], "--off"])
55
56
57 def get_resolutions():
58 """Return the list of supported screen resolutions."""
59 resolution_list = []
60 for line in get_vidmodes():
61 if line.startswith(" "):
62 resolution_list.append(line.split()[0])
63 return resolution_list
64
65
66 def get_current_resolution(monitor=0):
67 """Return the current resolution for the desktop."""
68 resolution = list()
69 for line in get_vidmodes():
70 if line.startswith(" ") and "*" in line:
71 resolution.append(line.split()[0])
72 if monitor == 'all':
73 return resolution
74 else:
75 return resolution[monitor]
76
77
78 def change_resolution(resolution):
79 """Change display resolution.
80
81 Takes a string for single monitors or a list of displays as returned
82 by get_outputs().
83 """
84 if not resolution:
85 logger.warning("No resolution provided")
86 return
87 if isinstance(resolution, str):
88 logger.debug("Switching resolution to %s", resolution)
89
90 if resolution not in get_resolutions():
91 logger.warning("Resolution %s doesn't exist." % resolution)
92 else:
93 subprocess.Popen(["xrandr", "-s", resolution])
94 else:
95 for display in resolution:
96 display_name = display[0]
97 logger.debug("Switching to %s on %s", display[1], display[0])
98 display_geom = display[1].split('+')
99 display_resolution = display_geom[0]
100 position = (display_geom[1], display_geom[2])
101
102 subprocess.Popen([
103 "xrandr",
104 "--output", display_name,
105 "--mode", display_resolution,
106 "--pos", "{}x{}".format(position[0], position[1])
107 ]).communicate()
108
109
110 def restore_gamma():
111 """Restores gamma to a normal level."""
112 subprocess.Popen(["xgamma", "-gamma", "1.0"])
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/display.py b/lutris/util/display.py
--- a/lutris/util/display.py
+++ b/lutris/util/display.py
@@ -6,14 +6,10 @@
def set_cursor(name, window, display=None):
"""Set a named mouse cursor for the given window."""
- cursors = {
- 'default': Gdk.CursorType.ARROW,
- 'wait': Gdk.CursorType.WATCH,
- }
if not display:
display = Gdk.Display.get_default()
- cursor = Gdk.Cursor.new_for_display(display, cursors[name])
+ cursor = Gdk.Cursor.new_from_name(display, name)
window.set_cursor(cursor)
| {"golden_diff": "diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -6,14 +6,10 @@\n \n def set_cursor(name, window, display=None):\n \"\"\"Set a named mouse cursor for the given window.\"\"\"\n- cursors = {\n- 'default': Gdk.CursorType.ARROW,\n- 'wait': Gdk.CursorType.WATCH,\n- }\n \n if not display:\n display = Gdk.Display.get_default()\n- cursor = Gdk.Cursor.new_for_display(display, cursors[name])\n+ cursor = Gdk.Cursor.new_from_name(display, name)\n window.set_cursor(cursor)\n", "issue": "Running game changes to default X cursor in main window\n\n\nI think [this is the code](https://github.com/lutris/lutris/blob/cbe6f5d5d982543a66f95aa62f0d8e26dd462470/lutris/util/display.py#L10) that sets the cursor. This was introduced in the commit https://github.com/lutris/lutris/commit/4bb2d78e5c3626057f9dc10dcc27f74ae67853b9\n\nRight now it's using [GdkCursorType](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#GdkCursorType) to set cursor, but it seems like we should be using [gdk_cursor_new_from_name()](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#gdk-cursor-new-from-name) instead which is the recommended way.\n\n", "before_files": [{"content": "import subprocess\nfrom gi.repository import Gdk\n\nfrom lutris.util.log import logger\n\n\ndef set_cursor(name, window, display=None):\n \"\"\"Set a named mouse cursor for the given window.\"\"\"\n cursors = {\n 'default': Gdk.CursorType.ARROW,\n 'wait': Gdk.CursorType.WATCH,\n }\n\n if not display:\n display = Gdk.Display.get_default()\n cursor = Gdk.Cursor.new_for_display(display, cursors[name])\n window.set_cursor(cursor)\n\n\ndef get_vidmodes():\n xrandr_output = subprocess.Popen([\"xrandr\"],\n stdout=subprocess.PIPE).communicate()[0]\n return list([line for line in xrandr_output.decode().split(\"\\n\")])\n\n\ndef get_outputs():\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n for line in vid_modes:\n parts = line.split()\n if len(parts) < 2:\n continue\n if parts[1] == 'connected':\n if len(parts) == 2:\n continue\n geom = parts[2] if parts[2] != 'primary' else parts[3]\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n outputs.append((parts[0], geom))\n return outputs\n\n\ndef get_output_names():\n return [output[0] for output in get_outputs()]\n\n\ndef turn_off_except(display):\n for output in get_outputs():\n if output[0] != display:\n subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n\n\ndef get_resolutions():\n \"\"\"Return the list of supported screen resolutions.\"\"\"\n resolution_list = []\n for line in get_vidmodes():\n if line.startswith(\" \"):\n resolution_list.append(line.split()[0])\n return resolution_list\n\n\ndef get_current_resolution(monitor=0):\n \"\"\"Return the current resolution for the desktop.\"\"\"\n resolution = list()\n for line in get_vidmodes():\n if line.startswith(\" \") and \"*\" in line:\n resolution.append(line.split()[0])\n if monitor == 'all':\n return resolution\n else:\n return resolution[monitor]\n\n\ndef change_resolution(resolution):\n \"\"\"Change display resolution.\n\n Takes a string for single monitors or a list of displays as returned\n by get_outputs().\n \"\"\"\n if not resolution:\n logger.warning(\"No resolution provided\")\n return\n if isinstance(resolution, str):\n logger.debug(\"Switching resolution to %s\", resolution)\n\n if resolution not in get_resolutions():\n logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n else:\n subprocess.Popen([\"xrandr\", \"-s\", resolution])\n else:\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n display_geom = display[1].split('+')\n display_resolution = display_geom[0]\n position = (display_geom[1], display_geom[2])\n\n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n \"--mode\", display_resolution,\n \"--pos\", \"{}x{}\".format(position[0], position[1])\n ]).communicate()\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n", "path": "lutris/util/display.py"}], "after_files": [{"content": "import subprocess\nfrom gi.repository import Gdk\n\nfrom lutris.util.log import logger\n\n\ndef set_cursor(name, window, display=None):\n \"\"\"Set a named mouse cursor for the given window.\"\"\"\n\n if not display:\n display = Gdk.Display.get_default()\n cursor = Gdk.Cursor.new_from_name(display, name)\n window.set_cursor(cursor)\n\n\ndef get_vidmodes():\n xrandr_output = subprocess.Popen([\"xrandr\"],\n stdout=subprocess.PIPE).communicate()[0]\n return list([line for line in xrandr_output.decode().split(\"\\n\")])\n\n\ndef get_outputs():\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n for line in vid_modes:\n parts = line.split()\n if len(parts) < 2:\n continue\n if parts[1] == 'connected':\n if len(parts) == 2:\n continue\n geom = parts[2] if parts[2] != 'primary' else parts[3]\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n outputs.append((parts[0], geom))\n return outputs\n\n\ndef get_output_names():\n return [output[0] for output in get_outputs()]\n\n\ndef turn_off_except(display):\n for output in get_outputs():\n if output[0] != display:\n subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n\n\ndef get_resolutions():\n \"\"\"Return the list of supported screen resolutions.\"\"\"\n resolution_list = []\n for line in get_vidmodes():\n if line.startswith(\" \"):\n resolution_list.append(line.split()[0])\n return resolution_list\n\n\ndef get_current_resolution(monitor=0):\n \"\"\"Return the current resolution for the desktop.\"\"\"\n resolution = list()\n for line in get_vidmodes():\n if line.startswith(\" \") and \"*\" in line:\n resolution.append(line.split()[0])\n if monitor == 'all':\n return resolution\n else:\n return resolution[monitor]\n\n\ndef change_resolution(resolution):\n \"\"\"Change display resolution.\n\n Takes a string for single monitors or a list of displays as returned\n by get_outputs().\n \"\"\"\n if not resolution:\n logger.warning(\"No resolution provided\")\n return\n if isinstance(resolution, str):\n logger.debug(\"Switching resolution to %s\", resolution)\n\n if resolution not in get_resolutions():\n logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n else:\n subprocess.Popen([\"xrandr\", \"-s\", resolution])\n else:\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n display_geom = display[1].split('+')\n display_resolution = display_geom[0]\n position = (display_geom[1], display_geom[2])\n\n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n \"--mode\", display_resolution,\n \"--pos\", \"{}x{}\".format(position[0], position[1])\n ]).communicate()\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n", "path": "lutris/util/display.py"}]} | 1,488 | 150 |
gh_patches_debug_21203 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4237 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show project's locations in EUTF Syria custom project report.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please
7 see < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 from akvo.rsr.models import Project, Country, Organisation, IndicatorPeriod
11 from akvo.rsr.staticmap import get_staticmap_url, Coordinate, Size
12 from datetime import datetime
13 from django.contrib.auth.decorators import login_required
14 from django.db.models import Q
15 from django.http import HttpResponse, HttpResponseBadRequest
16 from django.shortcuts import get_object_or_404
17 from django.template.loader import render_to_string
18
19 from . import utils
20
21
22 @login_required
23 def render_organisation_projects_results_indicators_map_overview(request, org_id):
24 country = request.GET.get('country', '').strip()
25 if not country:
26 return HttpResponseBadRequest('Please provide the country code!')
27
28 show_comment = True if request.GET.get('comment', '').strip() == 'true' else False
29 start_date = utils.parse_date(request.GET.get('start_date', '').strip(), datetime(1900, 1, 1))
30 end_date = utils.parse_date(request.GET.get('end_date', '').strip(), datetime(2999, 12, 31))
31
32 country = get_object_or_404(Country, iso_code=country)
33 organisation = get_object_or_404(
34 Organisation.objects.prefetch_related(
35 'projects',
36 'projects__results',
37 'projects__results__indicators',
38 'projects__results__indicators__periods'
39 ),
40 pk=org_id
41 )
42 projects = organisation.all_projects().filter(primary_location__country=country)
43 coordinates = [
44 Coordinate(p.primary_location.latitude, p.primary_location.longitude)
45 for p
46 in projects
47 if p.primary_location
48 ]
49
50 now = datetime.today()
51
52 html = render_to_string(
53 'reports/organisation-projects-results-indicators-map-overview.html',
54 context={
55 'title': 'Results and indicators overview for projects in {}'.format(country.name),
56 'staticmap': get_staticmap_url(coordinates, Size(900, 600)),
57 'projects': [build_view_object(p, start_date, end_date) for p in projects],
58 'show_comment': show_comment,
59 'today': now.strftime('%d-%b-%Y'),
60 }
61 )
62
63 if request.GET.get('show-html', ''):
64 return HttpResponse(html)
65
66 filename = '{}-{}-{}-projects-results-indicators-overview.pdf'.format(
67 now.strftime('%Y%b%d'), organisation.id, country.iso_code
68 )
69
70 return utils.make_pdf_response(html, filename)
71
72
73 @login_required
74 def render_project_results_indicators_map_overview(request, project_id):
75 return _render_project_report(request, project_id, with_map=True)
76
77
78 @login_required
79 def render_project_results_indicators_overview(request, project_id):
80 return _render_project_report(request, project_id, with_disaggregation=True)
81
82
83 def build_view_object(project, start_date=None, end_date=None):
84 results = {}
85 periods = IndicatorPeriod.objects\
86 .select_related('indicator', 'indicator__result')\
87 .prefetch_related('disaggregations')\
88 .filter(indicator__result__project=project)
89 if start_date and end_date:
90 periods = periods.filter(
91 Q(period_start__isnull=True) | Q(period_start__gte=start_date),
92 Q(period_end__isnull=True) | Q(period_end__lte=end_date)
93 )
94 for period in periods:
95 indicator = period.indicator
96 result = indicator.result
97 if result.id not in results:
98 results[result.id] = {'item': result, 'indicators': {}}
99 if indicator.id not in results[result.id]['indicators']:
100 results[result.id]['indicators'][indicator.id] = {'item': indicator, 'periods': []}
101 results[result.id]['indicators'][indicator.id]['periods'].append(period)
102 return utils.ProjectProxy(project, results)
103
104
105 def _render_project_report(request, project_id, with_map=False, with_disaggregation=False):
106 show_comment = True if request.GET.get('comment', '').strip() == 'true' else False
107 start_date = utils.parse_date(request.GET.get('start_date', '').strip(), datetime(1900, 1, 1))
108 end_date = utils.parse_date(request.GET.get('end_date', '').strip(), datetime(2999, 12, 31))
109
110 project = get_object_or_404(
111 Project.objects.prefetch_related(
112 'partners',
113 'related_projects',
114 'related_to_projects',
115 'results',
116 'results__indicators',
117 'results__indicators__periods',
118 'results__indicators__periods__disaggregations'
119 ),
120 pk=project_id
121 )
122 project_location = project.primary_location
123 locations = [project_location]
124 if project.parents().count():
125 locations.append(project.parents().first().primary_location)
126 if project.children().count():
127 for child in project.children_all().published():
128 locations.append(child.primary_location)
129
130 if with_map:
131 coordinates = [Coordinate(loc.latitude, loc.longitude) for loc in locations if loc]
132
133 now = datetime.today()
134
135 html = render_to_string(
136 'reports/project-results-indicators-map-overview.html',
137 context={
138 'project': build_view_object(project, start_date, end_date),
139 'location': ", ".join([
140 _f
141 for _f
142 in [project_location.city, getattr(project_location.country, 'name', None)]
143 if _f
144 ]) if project_location else "",
145 'staticmap': get_staticmap_url(coordinates, Size(900, 600)) if with_map else None,
146 'show_comment': show_comment,
147 'show_disaggregations': with_disaggregation,
148 'today': now.strftime('%d-%b-%Y'),
149 }
150 )
151
152 if request.GET.get('show-html', ''):
153 return HttpResponse(html)
154
155 filename = '{}-{}-results-indicators{}-overview.pdf'.format(
156 now.strftime('%Y%b%d'), project.id, '-map' if with_map else '')
157
158 return utils.make_pdf_response(html, filename)
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py b/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py
--- a/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py
+++ b/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py
@@ -109,6 +109,7 @@
project = get_object_or_404(
Project.objects.prefetch_related(
+ 'locations',
'partners',
'related_projects',
'related_to_projects',
@@ -120,14 +121,10 @@
pk=project_id
)
project_location = project.primary_location
- locations = [project_location]
- if project.parents().count():
- locations.append(project.parents().first().primary_location)
- if project.children().count():
- for child in project.children_all().published():
- locations.append(child.primary_location)
+ coordinates = None
if with_map:
+ locations = project.locations.all()
coordinates = [Coordinate(loc.latitude, loc.longitude) for loc in locations if loc]
now = datetime.today()
| {"golden_diff": "diff --git a/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py b/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py\n--- a/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py\n+++ b/akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py\n@@ -109,6 +109,7 @@\n \n project = get_object_or_404(\n Project.objects.prefetch_related(\n+ 'locations',\n 'partners',\n 'related_projects',\n 'related_to_projects',\n@@ -120,14 +121,10 @@\n pk=project_id\n )\n project_location = project.primary_location\n- locations = [project_location]\n- if project.parents().count():\n- locations.append(project.parents().first().primary_location)\n- if project.children().count():\n- for child in project.children_all().published():\n- locations.append(child.primary_location)\n+ coordinates = None\n \n if with_map:\n+ locations = project.locations.all()\n coordinates = [Coordinate(loc.latitude, loc.longitude) for loc in locations if loc]\n \n now = datetime.today()\n", "issue": "Show project's locations in EUTF Syria custom project report.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import Project, Country, Organisation, IndicatorPeriod\nfrom akvo.rsr.staticmap import get_staticmap_url, Coordinate, Size\nfrom datetime import datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404\nfrom django.template.loader import render_to_string\n\nfrom . import utils\n\n\n@login_required\ndef render_organisation_projects_results_indicators_map_overview(request, org_id):\n country = request.GET.get('country', '').strip()\n if not country:\n return HttpResponseBadRequest('Please provide the country code!')\n\n show_comment = True if request.GET.get('comment', '').strip() == 'true' else False\n start_date = utils.parse_date(request.GET.get('start_date', '').strip(), datetime(1900, 1, 1))\n end_date = utils.parse_date(request.GET.get('end_date', '').strip(), datetime(2999, 12, 31))\n\n country = get_object_or_404(Country, iso_code=country)\n organisation = get_object_or_404(\n Organisation.objects.prefetch_related(\n 'projects',\n 'projects__results',\n 'projects__results__indicators',\n 'projects__results__indicators__periods'\n ),\n pk=org_id\n )\n projects = organisation.all_projects().filter(primary_location__country=country)\n coordinates = [\n Coordinate(p.primary_location.latitude, p.primary_location.longitude)\n for p\n in projects\n if p.primary_location\n ]\n\n now = datetime.today()\n\n html = render_to_string(\n 'reports/organisation-projects-results-indicators-map-overview.html',\n context={\n 'title': 'Results and indicators overview for projects in {}'.format(country.name),\n 'staticmap': get_staticmap_url(coordinates, Size(900, 600)),\n 'projects': [build_view_object(p, start_date, end_date) for p in projects],\n 'show_comment': show_comment,\n 'today': now.strftime('%d-%b-%Y'),\n }\n )\n\n if request.GET.get('show-html', ''):\n return HttpResponse(html)\n\n filename = '{}-{}-{}-projects-results-indicators-overview.pdf'.format(\n now.strftime('%Y%b%d'), organisation.id, country.iso_code\n )\n\n return utils.make_pdf_response(html, filename)\n\n\n@login_required\ndef render_project_results_indicators_map_overview(request, project_id):\n return _render_project_report(request, project_id, with_map=True)\n\n\n@login_required\ndef render_project_results_indicators_overview(request, project_id):\n return _render_project_report(request, project_id, with_disaggregation=True)\n\n\ndef build_view_object(project, start_date=None, end_date=None):\n results = {}\n periods = IndicatorPeriod.objects\\\n .select_related('indicator', 'indicator__result')\\\n .prefetch_related('disaggregations')\\\n .filter(indicator__result__project=project)\n if start_date and end_date:\n periods = periods.filter(\n Q(period_start__isnull=True) | Q(period_start__gte=start_date),\n Q(period_end__isnull=True) | Q(period_end__lte=end_date)\n )\n for period in periods:\n indicator = period.indicator\n result = indicator.result\n if result.id not in results:\n results[result.id] = {'item': result, 'indicators': {}}\n if indicator.id not in results[result.id]['indicators']:\n results[result.id]['indicators'][indicator.id] = {'item': indicator, 'periods': []}\n results[result.id]['indicators'][indicator.id]['periods'].append(period)\n return utils.ProjectProxy(project, results)\n\n\ndef _render_project_report(request, project_id, with_map=False, with_disaggregation=False):\n show_comment = True if request.GET.get('comment', '').strip() == 'true' else False\n start_date = utils.parse_date(request.GET.get('start_date', '').strip(), datetime(1900, 1, 1))\n end_date = utils.parse_date(request.GET.get('end_date', '').strip(), datetime(2999, 12, 31))\n\n project = get_object_or_404(\n Project.objects.prefetch_related(\n 'partners',\n 'related_projects',\n 'related_to_projects',\n 'results',\n 'results__indicators',\n 'results__indicators__periods',\n 'results__indicators__periods__disaggregations'\n ),\n pk=project_id\n )\n project_location = project.primary_location\n locations = [project_location]\n if project.parents().count():\n locations.append(project.parents().first().primary_location)\n if project.children().count():\n for child in project.children_all().published():\n locations.append(child.primary_location)\n\n if with_map:\n coordinates = [Coordinate(loc.latitude, loc.longitude) for loc in locations if loc]\n\n now = datetime.today()\n\n html = render_to_string(\n 'reports/project-results-indicators-map-overview.html',\n context={\n 'project': build_view_object(project, start_date, end_date),\n 'location': \", \".join([\n _f\n for _f\n in [project_location.city, getattr(project_location.country, 'name', None)]\n if _f\n ]) if project_location else \"\",\n 'staticmap': get_staticmap_url(coordinates, Size(900, 600)) if with_map else None,\n 'show_comment': show_comment,\n 'show_disaggregations': with_disaggregation,\n 'today': now.strftime('%d-%b-%Y'),\n }\n )\n\n if request.GET.get('show-html', ''):\n return HttpResponse(html)\n\n filename = '{}-{}-results-indicators{}-overview.pdf'.format(\n now.strftime('%Y%b%d'), project.id, '-map' if with_map else '')\n\n return utils.make_pdf_response(html, filename)\n", "path": "akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import Project, Country, Organisation, IndicatorPeriod\nfrom akvo.rsr.staticmap import get_staticmap_url, Coordinate, Size\nfrom datetime import datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404\nfrom django.template.loader import render_to_string\n\nfrom . import utils\n\n\n@login_required\ndef render_organisation_projects_results_indicators_map_overview(request, org_id):\n country = request.GET.get('country', '').strip()\n if not country:\n return HttpResponseBadRequest('Please provide the country code!')\n\n show_comment = True if request.GET.get('comment', '').strip() == 'true' else False\n start_date = utils.parse_date(request.GET.get('start_date', '').strip(), datetime(1900, 1, 1))\n end_date = utils.parse_date(request.GET.get('end_date', '').strip(), datetime(2999, 12, 31))\n\n country = get_object_or_404(Country, iso_code=country)\n organisation = get_object_or_404(\n Organisation.objects.prefetch_related(\n 'projects',\n 'projects__results',\n 'projects__results__indicators',\n 'projects__results__indicators__periods'\n ),\n pk=org_id\n )\n projects = organisation.all_projects().filter(primary_location__country=country)\n coordinates = [\n Coordinate(p.primary_location.latitude, p.primary_location.longitude)\n for p\n in projects\n if p.primary_location\n ]\n\n now = datetime.today()\n\n html = render_to_string(\n 'reports/organisation-projects-results-indicators-map-overview.html',\n context={\n 'title': 'Results and indicators overview for projects in {}'.format(country.name),\n 'staticmap': get_staticmap_url(coordinates, Size(900, 600)),\n 'projects': [build_view_object(p, start_date, end_date) for p in projects],\n 'show_comment': show_comment,\n 'today': now.strftime('%d-%b-%Y'),\n }\n )\n\n if request.GET.get('show-html', ''):\n return HttpResponse(html)\n\n filename = '{}-{}-{}-projects-results-indicators-overview.pdf'.format(\n now.strftime('%Y%b%d'), organisation.id, country.iso_code\n )\n\n return utils.make_pdf_response(html, filename)\n\n\n@login_required\ndef render_project_results_indicators_map_overview(request, project_id):\n return _render_project_report(request, project_id, with_map=True)\n\n\n@login_required\ndef render_project_results_indicators_overview(request, project_id):\n return _render_project_report(request, project_id, with_disaggregation=True)\n\n\ndef build_view_object(project, start_date=None, end_date=None):\n results = {}\n periods = IndicatorPeriod.objects\\\n .select_related('indicator', 'indicator__result')\\\n .prefetch_related('disaggregations')\\\n .filter(indicator__result__project=project)\n if start_date and end_date:\n periods = periods.filter(\n Q(period_start__isnull=True) | Q(period_start__gte=start_date),\n Q(period_end__isnull=True) | Q(period_end__lte=end_date)\n )\n for period in periods:\n indicator = period.indicator\n result = indicator.result\n if result.id not in results:\n results[result.id] = {'item': result, 'indicators': {}}\n if indicator.id not in results[result.id]['indicators']:\n results[result.id]['indicators'][indicator.id] = {'item': indicator, 'periods': []}\n results[result.id]['indicators'][indicator.id]['periods'].append(period)\n return utils.ProjectProxy(project, results)\n\n\ndef _render_project_report(request, project_id, with_map=False, with_disaggregation=False):\n show_comment = True if request.GET.get('comment', '').strip() == 'true' else False\n start_date = utils.parse_date(request.GET.get('start_date', '').strip(), datetime(1900, 1, 1))\n end_date = utils.parse_date(request.GET.get('end_date', '').strip(), datetime(2999, 12, 31))\n\n project = get_object_or_404(\n Project.objects.prefetch_related(\n 'locations',\n 'partners',\n 'related_projects',\n 'related_to_projects',\n 'results',\n 'results__indicators',\n 'results__indicators__periods',\n 'results__indicators__periods__disaggregations'\n ),\n pk=project_id\n )\n project_location = project.primary_location\n coordinates = None\n\n if with_map:\n locations = project.locations.all()\n coordinates = [Coordinate(loc.latitude, loc.longitude) for loc in locations if loc]\n\n now = datetime.today()\n\n html = render_to_string(\n 'reports/project-results-indicators-map-overview.html',\n context={\n 'project': build_view_object(project, start_date, end_date),\n 'location': \", \".join([\n _f\n for _f\n in [project_location.city, getattr(project_location.country, 'name', None)]\n if _f\n ]) if project_location else \"\",\n 'staticmap': get_staticmap_url(coordinates, Size(900, 600)) if with_map else None,\n 'show_comment': show_comment,\n 'show_disaggregations': with_disaggregation,\n 'today': now.strftime('%d-%b-%Y'),\n }\n )\n\n if request.GET.get('show-html', ''):\n return HttpResponse(html)\n\n filename = '{}-{}-results-indicators{}-overview.pdf'.format(\n now.strftime('%Y%b%d'), project.id, '-map' if with_map else '')\n\n return utils.make_pdf_response(html, filename)\n", "path": "akvo/rsr/views/py_reports/results_indicators_with_map_pdf_reports.py"}]} | 2,038 | 260 |
gh_patches_debug_35621 | rasdani/github-patches | git_diff | streamlink__streamlink-5443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.rtvs: No playable streams found on
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
rtvs plugin - stream not work
### Debug log
```text
PS C:\Users\My> streamlink https://www.rtvs.sk/televizia/live-24 --loglevel debug
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.2
[cli][debug] Streamlink: 5.3.1
[cli][debug] Dependencies:
[cli][debug] certifi: 2022.12.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.2
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.2
[cli][debug] urllib3: 1.26.14
[cli][debug] websocket-client: 1.5.1
[cli][debug] Arguments:
[cli][debug] url=https://www.rtvs.sk/televizia/live-24
[cli][debug] --loglevel=debug
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin rtvs for URL https://www.rtvs.sk/televizia/live-24
error: No playable streams found on this URL: https://www.rtvs.sk/televizia/live-24
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/rtvs.py`
Content:
```
1 """
2 $description Live TV channels from RTVS, a Slovak public, state-owned broadcaster.
3 $url rtvs.sk
4 $type live
5 $region Slovakia
6 """
7
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13 from streamlink.utils.parse import parse_json
14
15
16 @pluginmatcher(re.compile(
17 r"https?://www\.rtvs\.sk/televizia/live-[\w-]+",
18 ))
19 class Rtvs(Plugin):
20 _re_channel_id = re.compile(r"'stream':\s*'live-(\d+)'")
21
22 def _get_streams(self):
23 res = self.session.http.get(self.url)
24 m = self._re_channel_id.search(res.text)
25 if not m:
26 return
27
28 res = self.session.http.get(
29 "https://www.rtvs.sk/json/live5f.json",
30 params={
31 "c": m.group(1),
32 "b": "mozilla",
33 "p": "win",
34 "f": "0",
35 "d": "1",
36 },
37 )
38 videos = parse_json(res.text, schema=validate.Schema({
39 "clip": {
40 "sources": [{
41 "src": validate.url(),
42 "type": str,
43 }],
44 }},
45 validate.get(("clip", "sources")),
46 validate.filter(lambda n: n["type"] == "application/x-mpegurl"),
47 ))
48 for video in videos:
49 yield from HLSStream.parse_variant_playlist(self.session, video["src"]).items()
50
51
52 __plugin__ = Rtvs
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/rtvs.py b/src/streamlink/plugins/rtvs.py
--- a/src/streamlink/plugins/rtvs.py
+++ b/src/streamlink/plugins/rtvs.py
@@ -6,47 +6,52 @@
"""
import re
+from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
-from streamlink.utils.parse import parse_json
@pluginmatcher(re.compile(
- r"https?://www\.rtvs\.sk/televizia/live-[\w-]+",
+ r"https?://www\.rtvs\.sk/televizia/(?:live-|sport)",
))
class Rtvs(Plugin):
- _re_channel_id = re.compile(r"'stream':\s*'live-(\d+)'")
-
def _get_streams(self):
- res = self.session.http.get(self.url)
- m = self._re_channel_id.search(res.text)
- if not m:
+ channel = self.session.http.get(self.url, schema=validate.Schema(
+ validate.parse_html(),
+ validate.xml_xpath_string(".//iframe[@id='player_live']//@src"),
+ validate.url(path=validate.startswith("/embed/live/")),
+ validate.transform(lambda embed: urlparse(embed).path[len("/embed/live/"):]),
+ ))
+ if not channel:
return
- res = self.session.http.get(
+ videos = self.session.http.get(
"https://www.rtvs.sk/json/live5f.json",
params={
- "c": m.group(1),
+ "c": channel,
"b": "mozilla",
"p": "win",
"f": "0",
"d": "1",
},
+ schema=validate.Schema(
+ validate.parse_json(),
+ {
+ "clip": {
+ "sources": [{
+ "src": validate.url(),
+ "type": str,
+ }],
+ },
+ },
+ validate.get(("clip", "sources")),
+ validate.filter(lambda n: n["type"] == "application/x-mpegurl"),
+ ),
)
- videos = parse_json(res.text, schema=validate.Schema({
- "clip": {
- "sources": [{
- "src": validate.url(),
- "type": str,
- }],
- }},
- validate.get(("clip", "sources")),
- validate.filter(lambda n: n["type"] == "application/x-mpegurl"),
- ))
for video in videos:
- yield from HLSStream.parse_variant_playlist(self.session, video["src"]).items()
+ return HLSStream.parse_variant_playlist(self.session, video["src"])
__plugin__ = Rtvs
| {"golden_diff": "diff --git a/src/streamlink/plugins/rtvs.py b/src/streamlink/plugins/rtvs.py\n--- a/src/streamlink/plugins/rtvs.py\n+++ b/src/streamlink/plugins/rtvs.py\n@@ -6,47 +6,52 @@\n \"\"\"\n \n import re\n+from urllib.parse import urlparse\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.hls import HLSStream\n-from streamlink.utils.parse import parse_json\n \n \n @pluginmatcher(re.compile(\n- r\"https?://www\\.rtvs\\.sk/televizia/live-[\\w-]+\",\n+ r\"https?://www\\.rtvs\\.sk/televizia/(?:live-|sport)\",\n ))\n class Rtvs(Plugin):\n- _re_channel_id = re.compile(r\"'stream':\\s*'live-(\\d+)'\")\n-\n def _get_streams(self):\n- res = self.session.http.get(self.url)\n- m = self._re_channel_id.search(res.text)\n- if not m:\n+ channel = self.session.http.get(self.url, schema=validate.Schema(\n+ validate.parse_html(),\n+ validate.xml_xpath_string(\".//iframe[@id='player_live']//@src\"),\n+ validate.url(path=validate.startswith(\"/embed/live/\")),\n+ validate.transform(lambda embed: urlparse(embed).path[len(\"/embed/live/\"):]),\n+ ))\n+ if not channel:\n return\n \n- res = self.session.http.get(\n+ videos = self.session.http.get(\n \"https://www.rtvs.sk/json/live5f.json\",\n params={\n- \"c\": m.group(1),\n+ \"c\": channel,\n \"b\": \"mozilla\",\n \"p\": \"win\",\n \"f\": \"0\",\n \"d\": \"1\",\n },\n+ schema=validate.Schema(\n+ validate.parse_json(),\n+ {\n+ \"clip\": {\n+ \"sources\": [{\n+ \"src\": validate.url(),\n+ \"type\": str,\n+ }],\n+ },\n+ },\n+ validate.get((\"clip\", \"sources\")),\n+ validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n+ ),\n )\n- videos = parse_json(res.text, schema=validate.Schema({\n- \"clip\": {\n- \"sources\": [{\n- \"src\": validate.url(),\n- \"type\": str,\n- }],\n- }},\n- validate.get((\"clip\", \"sources\")),\n- validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n- ))\n for video in videos:\n- yield from HLSStream.parse_variant_playlist(self.session, video[\"src\"]).items()\n+ return HLSStream.parse_variant_playlist(self.session, video[\"src\"])\n \n \n __plugin__ = Rtvs\n", "issue": "plugins.rtvs: No playable streams found on\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nrtvs plugin - stream not work\n\n### Debug log\n\n```text\nPS C:\\Users\\My> streamlink https://www.rtvs.sk/televizia/live-24 --loglevel debug\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.2\r\n[cli][debug] Streamlink: 5.3.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.12.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.2\r\n[cli][debug] urllib3: 1.26.14\r\n[cli][debug] websocket-client: 1.5.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.rtvs.sk/televizia/live-24\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin rtvs for URL https://www.rtvs.sk/televizia/live-24\r\nerror: No playable streams found on this URL: https://www.rtvs.sk/televizia/live-24\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from RTVS, a Slovak public, state-owned broadcaster.\n$url rtvs.sk\n$type live\n$region Slovakia\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.parse import parse_json\n\n\n@pluginmatcher(re.compile(\n r\"https?://www\\.rtvs\\.sk/televizia/live-[\\w-]+\",\n))\nclass Rtvs(Plugin):\n _re_channel_id = re.compile(r\"'stream':\\s*'live-(\\d+)'\")\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n m = self._re_channel_id.search(res.text)\n if not m:\n return\n\n res = self.session.http.get(\n \"https://www.rtvs.sk/json/live5f.json\",\n params={\n \"c\": m.group(1),\n \"b\": \"mozilla\",\n \"p\": \"win\",\n \"f\": \"0\",\n \"d\": \"1\",\n },\n )\n videos = parse_json(res.text, schema=validate.Schema({\n \"clip\": {\n \"sources\": [{\n \"src\": validate.url(),\n \"type\": str,\n }],\n }},\n validate.get((\"clip\", \"sources\")),\n validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n ))\n for video in videos:\n yield from HLSStream.parse_variant_playlist(self.session, video[\"src\"]).items()\n\n\n__plugin__ = Rtvs\n", "path": "src/streamlink/plugins/rtvs.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels from RTVS, a Slovak public, state-owned broadcaster.\n$url rtvs.sk\n$type live\n$region Slovakia\n\"\"\"\n\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\n@pluginmatcher(re.compile(\n r\"https?://www\\.rtvs\\.sk/televizia/(?:live-|sport)\",\n))\nclass Rtvs(Plugin):\n def _get_streams(self):\n channel = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//iframe[@id='player_live']//@src\"),\n validate.url(path=validate.startswith(\"/embed/live/\")),\n validate.transform(lambda embed: urlparse(embed).path[len(\"/embed/live/\"):]),\n ))\n if not channel:\n return\n\n videos = self.session.http.get(\n \"https://www.rtvs.sk/json/live5f.json\",\n params={\n \"c\": channel,\n \"b\": \"mozilla\",\n \"p\": \"win\",\n \"f\": \"0\",\n \"d\": \"1\",\n },\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"clip\": {\n \"sources\": [{\n \"src\": validate.url(),\n \"type\": str,\n }],\n },\n },\n validate.get((\"clip\", \"sources\")),\n validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n ),\n )\n for video in videos:\n return HLSStream.parse_variant_playlist(self.session, video[\"src\"])\n\n\n__plugin__ = Rtvs\n", "path": "src/streamlink/plugins/rtvs.py"}]} | 1,222 | 603 |
gh_patches_debug_19490 | rasdani/github-patches | git_diff | scrapy__scrapy-707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scrapy engine can stop even if start_requests is not empty
Consider this example spider
``` python
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
class DmozSpider(Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def start_requests(self):
for url in self.start_urls:
yield Request(
url,
#dont_filter=True
)
def parse(self, response):
self.log("parse %r" % response.url)
```
and run it with `CONCURRENT_REQUESTS=1` and `DupeFilter` enabled,
it will only visit `http://www.dmoz.org/Computers/Programming/Languages/Python/Books/`
Even with higher concurrency settings, it can happen that the spider is considered idle because the next request from `start_requests` iterator was filtered.
https://github.com/scrapy/scrapy/blob/master/scrapy/core/engine.py#L120 or https://github.com/scrapy/scrapy/blob/master/scrapy/core/engine.py#L155 seems to be missing a test on `slot.start_requests is None`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/core/engine.py`
Content:
```
1 """
2 This is the Scrapy engine which controls the Scheduler, Downloader and Spiders.
3
4 For more information see docs/topics/architecture.rst
5
6 """
7 import warnings
8 from time import time
9
10 from twisted.internet import defer
11 from twisted.python.failure import Failure
12
13 from scrapy import log, signals
14 from scrapy.core.downloader import Downloader
15 from scrapy.core.scraper import Scraper
16 from scrapy.exceptions import DontCloseSpider, ScrapyDeprecationWarning
17 from scrapy.http import Response, Request
18 from scrapy.utils.misc import load_object
19 from scrapy.utils.reactor import CallLaterOnce
20
21
22 class Slot(object):
23
24 def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
25 self.closing = False
26 self.inprogress = set() # requests in progress
27 self.start_requests = iter(start_requests)
28 self.close_if_idle = close_if_idle
29 self.nextcall = nextcall
30 self.scheduler = scheduler
31
32 def add_request(self, request):
33 self.inprogress.add(request)
34
35 def remove_request(self, request):
36 self.inprogress.remove(request)
37 self._maybe_fire_closing()
38
39 def close(self):
40 self.closing = defer.Deferred()
41 self._maybe_fire_closing()
42 return self.closing
43
44 def _maybe_fire_closing(self):
45 if self.closing and not self.inprogress:
46 if self.nextcall:
47 self.nextcall.cancel()
48 self.closing.callback(None)
49
50
51 class ExecutionEngine(object):
52
53 def __init__(self, crawler, spider_closed_callback):
54 self.crawler = crawler
55 self.settings = crawler.settings
56 self.signals = crawler.signals
57 self.logformatter = crawler.logformatter
58 self.slot = None
59 self.spider = None
60 self.running = False
61 self.paused = False
62 self.scheduler_cls = load_object(self.settings['SCHEDULER'])
63 self.downloader = Downloader(crawler)
64 self.scraper = Scraper(crawler)
65 self._concurrent_spiders = self.settings.getint('CONCURRENT_SPIDERS', 1)
66 if self._concurrent_spiders != 1:
67 warnings.warn("CONCURRENT_SPIDERS settings is deprecated, use " \
68 "Scrapyd max_proc config instead", ScrapyDeprecationWarning)
69 self._spider_closed_callback = spider_closed_callback
70
71 @defer.inlineCallbacks
72 def start(self):
73 """Start the execution engine"""
74 assert not self.running, "Engine already running"
75 self.start_time = time()
76 yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
77 self.running = True
78 self._closewait = defer.Deferred()
79 yield self._closewait
80
81 def stop(self):
82 """Stop the execution engine gracefully"""
83 assert self.running, "Engine not running"
84 self.running = False
85 dfd = self._close_all_spiders()
86 return dfd.addBoth(lambda _: self._finish_stopping_engine())
87
88 def pause(self):
89 """Pause the execution engine"""
90 self.paused = True
91
92 def unpause(self):
93 """Resume the execution engine"""
94 self.paused = False
95
96 def _next_request(self, spider):
97 slot = self.slot
98 if not slot:
99 return
100
101 if self.paused:
102 slot.nextcall.schedule(5)
103 return
104
105 while not self._needs_backout(spider):
106 if not self._next_request_from_scheduler(spider):
107 break
108
109 if slot.start_requests and not self._needs_backout(spider):
110 try:
111 request = next(slot.start_requests)
112 except StopIteration:
113 slot.start_requests = None
114 except Exception as exc:
115 log.err(None, 'Obtaining request from start requests', \
116 spider=spider)
117 else:
118 self.crawl(request, spider)
119
120 if self.spider_is_idle(spider) and slot.close_if_idle:
121 self._spider_idle(spider)
122
123 def _needs_backout(self, spider):
124 slot = self.slot
125 return not self.running \
126 or slot.closing \
127 or self.downloader.needs_backout() \
128 or self.scraper.slot.needs_backout()
129
130 def _next_request_from_scheduler(self, spider):
131 slot = self.slot
132 request = slot.scheduler.next_request()
133 if not request:
134 return
135 d = self._download(request, spider)
136 d.addBoth(self._handle_downloader_output, request, spider)
137 d.addErrback(log.msg, spider=spider)
138 d.addBoth(lambda _: slot.remove_request(request))
139 d.addErrback(log.msg, spider=spider)
140 d.addBoth(lambda _: slot.nextcall.schedule())
141 d.addErrback(log.msg, spider=spider)
142 return d
143
144 def _handle_downloader_output(self, response, request, spider):
145 assert isinstance(response, (Request, Response, Failure)), response
146 # downloader middleware can return requests (for example, redirects)
147 if isinstance(response, Request):
148 self.crawl(response, spider)
149 return
150 # response is a Response or Failure
151 d = self.scraper.enqueue_scrape(response, request, spider)
152 d.addErrback(log.err, spider=spider)
153 return d
154
155 def spider_is_idle(self, spider):
156 scraper_idle = self.scraper.slot.is_idle()
157 pending = self.slot.scheduler.has_pending_requests()
158 downloading = bool(self.downloader.active)
159 idle = scraper_idle and not (pending or downloading)
160 return idle
161
162 @property
163 def open_spiders(self):
164 return [self.spider] if self.spider else []
165
166 def has_capacity(self):
167 """Does the engine have capacity to handle more spiders"""
168 return not bool(self.slot)
169
170 def crawl(self, request, spider):
171 assert spider in self.open_spiders, \
172 "Spider %r not opened when crawling: %s" % (spider.name, request)
173 self.schedule(request, spider)
174 self.slot.nextcall.schedule()
175
176 def schedule(self, request, spider):
177 self.signals.send_catch_log(signal=signals.request_scheduled,
178 request=request, spider=spider)
179 return self.slot.scheduler.enqueue_request(request)
180
181 def download(self, request, spider):
182 slot = self.slot
183 slot.add_request(request)
184 d = self._download(request, spider)
185 d.addBoth(self._downloaded, slot, request, spider)
186 return d
187
188 def _downloaded(self, response, slot, request, spider):
189 slot.remove_request(request)
190 return self.download(response, spider) \
191 if isinstance(response, Request) else response
192
193 def _download(self, request, spider):
194 slot = self.slot
195 slot.add_request(request)
196 def _on_success(response):
197 assert isinstance(response, (Response, Request))
198 if isinstance(response, Response):
199 response.request = request # tie request to response received
200 logkws = self.logformatter.crawled(request, response, spider)
201 log.msg(spider=spider, **logkws)
202 self.signals.send_catch_log(signal=signals.response_received, \
203 response=response, request=request, spider=spider)
204 return response
205
206 def _on_complete(_):
207 slot.nextcall.schedule()
208 return _
209
210 dwld = self.downloader.fetch(request, spider)
211 dwld.addCallbacks(_on_success)
212 dwld.addBoth(_on_complete)
213 return dwld
214
215 @defer.inlineCallbacks
216 def open_spider(self, spider, start_requests=(), close_if_idle=True):
217 assert self.has_capacity(), "No free spider slot when opening %r" % \
218 spider.name
219 log.msg("Spider opened", spider=spider)
220 nextcall = CallLaterOnce(self._next_request, spider)
221 scheduler = self.scheduler_cls.from_crawler(self.crawler)
222 start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)
223 slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
224 self.slot = slot
225 self.spider = spider
226 yield scheduler.open(spider)
227 yield self.scraper.open_spider(spider)
228 self.crawler.stats.open_spider(spider)
229 yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
230 slot.nextcall.schedule()
231
232 def _spider_idle(self, spider):
233 """Called when a spider gets idle. This function is called when there
234 are no remaining pages to download or schedule. It can be called
235 multiple times. If some extension raises a DontCloseSpider exception
236 (in the spider_idle signal handler) the spider is not closed until the
237 next loop and this function is guaranteed to be called (at least) once
238 again for this spider.
239 """
240 res = self.signals.send_catch_log(signal=signals.spider_idle, \
241 spider=spider, dont_log=DontCloseSpider)
242 if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
243 for _, x in res):
244 self.slot.nextcall.schedule(5)
245 return
246
247 if self.spider_is_idle(spider):
248 self.close_spider(spider, reason='finished')
249
250 def close_spider(self, spider, reason='cancelled'):
251 """Close (cancel) spider and clear all its outstanding requests"""
252
253 slot = self.slot
254 if slot.closing:
255 return slot.closing
256 log.msg(format="Closing spider (%(reason)s)", reason=reason, spider=spider)
257
258 dfd = slot.close()
259
260 dfd.addBoth(lambda _: self.downloader.close())
261 dfd.addErrback(log.err, spider=spider)
262
263 dfd.addBoth(lambda _: self.scraper.close_spider(spider))
264 dfd.addErrback(log.err, spider=spider)
265
266 dfd.addBoth(lambda _: slot.scheduler.close(reason))
267 dfd.addErrback(log.err, spider=spider)
268
269 # XXX: spider_stats argument was added for backwards compatibility with
270 # stats collection refactoring added in 0.15. it should be removed in 0.17.
271 dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(signal=signals.spider_closed, \
272 spider=spider, reason=reason, spider_stats=self.crawler.stats.get_stats()))
273 dfd.addErrback(log.err, spider=spider)
274
275 dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
276 dfd.addErrback(log.err, spider=spider)
277
278 dfd.addBoth(lambda _: log.msg(format="Spider closed (%(reason)s)", reason=reason, spider=spider))
279
280 dfd.addBoth(lambda _: setattr(self, 'slot', None))
281 dfd.addErrback(log.err, spider=spider)
282
283 dfd.addBoth(lambda _: setattr(self, 'spider', None))
284 dfd.addErrback(log.err, spider=spider)
285
286 dfd.addBoth(lambda _: self._spider_closed_callback(spider))
287
288 return dfd
289
290 def _close_all_spiders(self):
291 dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
292 dlist = defer.DeferredList(dfds)
293 return dlist
294
295 @defer.inlineCallbacks
296 def _finish_stopping_engine(self):
297 yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
298 self._closewait.callback(None)
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/core/engine.py b/scrapy/core/engine.py
--- a/scrapy/core/engine.py
+++ b/scrapy/core/engine.py
@@ -112,6 +112,7 @@
except StopIteration:
slot.start_requests = None
except Exception as exc:
+ slot.start_requests = None
log.err(None, 'Obtaining request from start requests', \
spider=spider)
else:
@@ -156,7 +157,8 @@
scraper_idle = self.scraper.slot.is_idle()
pending = self.slot.scheduler.has_pending_requests()
downloading = bool(self.downloader.active)
- idle = scraper_idle and not (pending or downloading)
+ pending_start_requests = self.slot.start_requests is not None
+ idle = scraper_idle and not (pending or downloading or pending_start_requests)
return idle
@property
| {"golden_diff": "diff --git a/scrapy/core/engine.py b/scrapy/core/engine.py\n--- a/scrapy/core/engine.py\n+++ b/scrapy/core/engine.py\n@@ -112,6 +112,7 @@\n except StopIteration:\n slot.start_requests = None\n except Exception as exc:\n+ slot.start_requests = None\n log.err(None, 'Obtaining request from start requests', \\\n spider=spider)\n else:\n@@ -156,7 +157,8 @@\n scraper_idle = self.scraper.slot.is_idle()\n pending = self.slot.scheduler.has_pending_requests()\n downloading = bool(self.downloader.active)\n- idle = scraper_idle and not (pending or downloading)\n+ pending_start_requests = self.slot.start_requests is not None\n+ idle = scraper_idle and not (pending or downloading or pending_start_requests)\n return idle\n \n @property\n", "issue": "scrapy engine can stop even if start_requests is not empty\nConsider this example spider\n\n``` python\nfrom scrapy.spider import Spider\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nclass DmozSpider(Spider):\n name = \"dmoz\"\n allowed_domains = [\"dmoz.org\"]\n start_urls = [\n \"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/\",\n \"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/\",\n \"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/\",\n \"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/\",\n \"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/\"\n ]\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(\n url,\n #dont_filter=True\n )\n\n def parse(self, response):\n self.log(\"parse %r\" % response.url)\n\n```\n\nand run it with `CONCURRENT_REQUESTS=1` and `DupeFilter` enabled,\nit will only visit `http://www.dmoz.org/Computers/Programming/Languages/Python/Books/`\n\nEven with higher concurrency settings, it can happen that the spider is considered idle because the next request from `start_requests` iterator was filtered.\n\nhttps://github.com/scrapy/scrapy/blob/master/scrapy/core/engine.py#L120 or https://github.com/scrapy/scrapy/blob/master/scrapy/core/engine.py#L155 seems to be missing a test on `slot.start_requests is None`\n\n", "before_files": [{"content": "\"\"\"\nThis is the Scrapy engine which controls the Scheduler, Downloader and Spiders.\n\nFor more information see docs/topics/architecture.rst\n\n\"\"\"\nimport warnings\nfrom time import time\n\nfrom twisted.internet import defer\nfrom twisted.python.failure import Failure\n\nfrom scrapy import log, signals\nfrom scrapy.core.downloader import Downloader\nfrom scrapy.core.scraper import Scraper\nfrom scrapy.exceptions import DontCloseSpider, ScrapyDeprecationWarning\nfrom scrapy.http import Response, Request\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.reactor import CallLaterOnce\n\n\nclass Slot(object):\n\n def __init__(self, start_requests, close_if_idle, nextcall, scheduler):\n self.closing = False\n self.inprogress = set() # requests in progress\n self.start_requests = iter(start_requests)\n self.close_if_idle = close_if_idle\n self.nextcall = nextcall\n self.scheduler = scheduler\n\n def add_request(self, request):\n self.inprogress.add(request)\n\n def remove_request(self, request):\n self.inprogress.remove(request)\n self._maybe_fire_closing()\n\n def close(self):\n self.closing = defer.Deferred()\n self._maybe_fire_closing()\n return self.closing\n\n def _maybe_fire_closing(self):\n if self.closing and not self.inprogress:\n if self.nextcall:\n self.nextcall.cancel()\n self.closing.callback(None)\n\n\nclass ExecutionEngine(object):\n\n def __init__(self, crawler, spider_closed_callback):\n self.crawler = crawler\n self.settings = crawler.settings\n self.signals = crawler.signals\n self.logformatter = crawler.logformatter\n self.slot = None\n self.spider = None\n self.running = False\n self.paused = False\n self.scheduler_cls = load_object(self.settings['SCHEDULER'])\n self.downloader = Downloader(crawler)\n self.scraper = Scraper(crawler)\n self._concurrent_spiders = self.settings.getint('CONCURRENT_SPIDERS', 1)\n if self._concurrent_spiders != 1:\n warnings.warn(\"CONCURRENT_SPIDERS settings is deprecated, use \" \\\n \"Scrapyd max_proc config instead\", ScrapyDeprecationWarning)\n self._spider_closed_callback = spider_closed_callback\n\n @defer.inlineCallbacks\n def start(self):\n \"\"\"Start the execution engine\"\"\"\n assert not self.running, \"Engine already running\"\n self.start_time = time()\n yield self.signals.send_catch_log_deferred(signal=signals.engine_started)\n self.running = True\n self._closewait = defer.Deferred()\n yield self._closewait\n\n def stop(self):\n \"\"\"Stop the execution engine gracefully\"\"\"\n assert self.running, \"Engine not running\"\n self.running = False\n dfd = self._close_all_spiders()\n return dfd.addBoth(lambda _: self._finish_stopping_engine())\n\n def pause(self):\n \"\"\"Pause the execution engine\"\"\"\n self.paused = True\n\n def unpause(self):\n \"\"\"Resume the execution engine\"\"\"\n self.paused = False\n\n def _next_request(self, spider):\n slot = self.slot\n if not slot:\n return\n\n if self.paused:\n slot.nextcall.schedule(5)\n return\n\n while not self._needs_backout(spider):\n if not self._next_request_from_scheduler(spider):\n break\n\n if slot.start_requests and not self._needs_backout(spider):\n try:\n request = next(slot.start_requests)\n except StopIteration:\n slot.start_requests = None\n except Exception as exc:\n log.err(None, 'Obtaining request from start requests', \\\n spider=spider)\n else:\n self.crawl(request, spider)\n\n if self.spider_is_idle(spider) and slot.close_if_idle:\n self._spider_idle(spider)\n\n def _needs_backout(self, spider):\n slot = self.slot\n return not self.running \\\n or slot.closing \\\n or self.downloader.needs_backout() \\\n or self.scraper.slot.needs_backout()\n\n def _next_request_from_scheduler(self, spider):\n slot = self.slot\n request = slot.scheduler.next_request()\n if not request:\n return\n d = self._download(request, spider)\n d.addBoth(self._handle_downloader_output, request, spider)\n d.addErrback(log.msg, spider=spider)\n d.addBoth(lambda _: slot.remove_request(request))\n d.addErrback(log.msg, spider=spider)\n d.addBoth(lambda _: slot.nextcall.schedule())\n d.addErrback(log.msg, spider=spider)\n return d\n\n def _handle_downloader_output(self, response, request, spider):\n assert isinstance(response, (Request, Response, Failure)), response\n # downloader middleware can return requests (for example, redirects)\n if isinstance(response, Request):\n self.crawl(response, spider)\n return\n # response is a Response or Failure\n d = self.scraper.enqueue_scrape(response, request, spider)\n d.addErrback(log.err, spider=spider)\n return d\n\n def spider_is_idle(self, spider):\n scraper_idle = self.scraper.slot.is_idle()\n pending = self.slot.scheduler.has_pending_requests()\n downloading = bool(self.downloader.active)\n idle = scraper_idle and not (pending or downloading)\n return idle\n\n @property\n def open_spiders(self):\n return [self.spider] if self.spider else []\n\n def has_capacity(self):\n \"\"\"Does the engine have capacity to handle more spiders\"\"\"\n return not bool(self.slot)\n\n def crawl(self, request, spider):\n assert spider in self.open_spiders, \\\n \"Spider %r not opened when crawling: %s\" % (spider.name, request)\n self.schedule(request, spider)\n self.slot.nextcall.schedule()\n\n def schedule(self, request, spider):\n self.signals.send_catch_log(signal=signals.request_scheduled,\n request=request, spider=spider)\n return self.slot.scheduler.enqueue_request(request)\n\n def download(self, request, spider):\n slot = self.slot\n slot.add_request(request)\n d = self._download(request, spider)\n d.addBoth(self._downloaded, slot, request, spider)\n return d\n\n def _downloaded(self, response, slot, request, spider):\n slot.remove_request(request)\n return self.download(response, spider) \\\n if isinstance(response, Request) else response\n\n def _download(self, request, spider):\n slot = self.slot\n slot.add_request(request)\n def _on_success(response):\n assert isinstance(response, (Response, Request))\n if isinstance(response, Response):\n response.request = request # tie request to response received\n logkws = self.logformatter.crawled(request, response, spider)\n log.msg(spider=spider, **logkws)\n self.signals.send_catch_log(signal=signals.response_received, \\\n response=response, request=request, spider=spider)\n return response\n\n def _on_complete(_):\n slot.nextcall.schedule()\n return _\n\n dwld = self.downloader.fetch(request, spider)\n dwld.addCallbacks(_on_success)\n dwld.addBoth(_on_complete)\n return dwld\n\n @defer.inlineCallbacks\n def open_spider(self, spider, start_requests=(), close_if_idle=True):\n assert self.has_capacity(), \"No free spider slot when opening %r\" % \\\n spider.name\n log.msg(\"Spider opened\", spider=spider)\n nextcall = CallLaterOnce(self._next_request, spider)\n scheduler = self.scheduler_cls.from_crawler(self.crawler)\n start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)\n slot = Slot(start_requests, close_if_idle, nextcall, scheduler)\n self.slot = slot\n self.spider = spider\n yield scheduler.open(spider)\n yield self.scraper.open_spider(spider)\n self.crawler.stats.open_spider(spider)\n yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)\n slot.nextcall.schedule()\n\n def _spider_idle(self, spider):\n \"\"\"Called when a spider gets idle. This function is called when there\n are no remaining pages to download or schedule. It can be called\n multiple times. If some extension raises a DontCloseSpider exception\n (in the spider_idle signal handler) the spider is not closed until the\n next loop and this function is guaranteed to be called (at least) once\n again for this spider.\n \"\"\"\n res = self.signals.send_catch_log(signal=signals.spider_idle, \\\n spider=spider, dont_log=DontCloseSpider)\n if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \\\n for _, x in res):\n self.slot.nextcall.schedule(5)\n return\n\n if self.spider_is_idle(spider):\n self.close_spider(spider, reason='finished')\n\n def close_spider(self, spider, reason='cancelled'):\n \"\"\"Close (cancel) spider and clear all its outstanding requests\"\"\"\n\n slot = self.slot\n if slot.closing:\n return slot.closing\n log.msg(format=\"Closing spider (%(reason)s)\", reason=reason, spider=spider)\n\n dfd = slot.close()\n\n dfd.addBoth(lambda _: self.downloader.close())\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: self.scraper.close_spider(spider))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: slot.scheduler.close(reason))\n dfd.addErrback(log.err, spider=spider)\n\n # XXX: spider_stats argument was added for backwards compatibility with\n # stats collection refactoring added in 0.15. it should be removed in 0.17.\n dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(signal=signals.spider_closed, \\\n spider=spider, reason=reason, spider_stats=self.crawler.stats.get_stats()))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: log.msg(format=\"Spider closed (%(reason)s)\", reason=reason, spider=spider))\n\n dfd.addBoth(lambda _: setattr(self, 'slot', None))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: setattr(self, 'spider', None))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: self._spider_closed_callback(spider))\n\n return dfd\n\n def _close_all_spiders(self):\n dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]\n dlist = defer.DeferredList(dfds)\n return dlist\n\n @defer.inlineCallbacks\n def _finish_stopping_engine(self):\n yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)\n self._closewait.callback(None)\n", "path": "scrapy/core/engine.py"}], "after_files": [{"content": "\"\"\"\nThis is the Scrapy engine which controls the Scheduler, Downloader and Spiders.\n\nFor more information see docs/topics/architecture.rst\n\n\"\"\"\nimport warnings\nfrom time import time\n\nfrom twisted.internet import defer\nfrom twisted.python.failure import Failure\n\nfrom scrapy import log, signals\nfrom scrapy.core.downloader import Downloader\nfrom scrapy.core.scraper import Scraper\nfrom scrapy.exceptions import DontCloseSpider, ScrapyDeprecationWarning\nfrom scrapy.http import Response, Request\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.reactor import CallLaterOnce\n\n\nclass Slot(object):\n\n def __init__(self, start_requests, close_if_idle, nextcall, scheduler):\n self.closing = False\n self.inprogress = set() # requests in progress\n self.start_requests = iter(start_requests)\n self.close_if_idle = close_if_idle\n self.nextcall = nextcall\n self.scheduler = scheduler\n\n def add_request(self, request):\n self.inprogress.add(request)\n\n def remove_request(self, request):\n self.inprogress.remove(request)\n self._maybe_fire_closing()\n\n def close(self):\n self.closing = defer.Deferred()\n self._maybe_fire_closing()\n return self.closing\n\n def _maybe_fire_closing(self):\n if self.closing and not self.inprogress:\n if self.nextcall:\n self.nextcall.cancel()\n self.closing.callback(None)\n\n\nclass ExecutionEngine(object):\n\n def __init__(self, crawler, spider_closed_callback):\n self.crawler = crawler\n self.settings = crawler.settings\n self.signals = crawler.signals\n self.logformatter = crawler.logformatter\n self.slot = None\n self.spider = None\n self.running = False\n self.paused = False\n self.scheduler_cls = load_object(self.settings['SCHEDULER'])\n self.downloader = Downloader(crawler)\n self.scraper = Scraper(crawler)\n self._concurrent_spiders = self.settings.getint('CONCURRENT_SPIDERS', 1)\n if self._concurrent_spiders != 1:\n warnings.warn(\"CONCURRENT_SPIDERS settings is deprecated, use \" \\\n \"Scrapyd max_proc config instead\", ScrapyDeprecationWarning)\n self._spider_closed_callback = spider_closed_callback\n\n @defer.inlineCallbacks\n def start(self):\n \"\"\"Start the execution engine\"\"\"\n assert not self.running, \"Engine already running\"\n self.start_time = time()\n yield self.signals.send_catch_log_deferred(signal=signals.engine_started)\n self.running = True\n self._closewait = defer.Deferred()\n yield self._closewait\n\n def stop(self):\n \"\"\"Stop the execution engine gracefully\"\"\"\n assert self.running, \"Engine not running\"\n self.running = False\n dfd = self._close_all_spiders()\n return dfd.addBoth(lambda _: self._finish_stopping_engine())\n\n def pause(self):\n \"\"\"Pause the execution engine\"\"\"\n self.paused = True\n\n def unpause(self):\n \"\"\"Resume the execution engine\"\"\"\n self.paused = False\n\n def _next_request(self, spider):\n slot = self.slot\n if not slot:\n return\n\n if self.paused:\n slot.nextcall.schedule(5)\n return\n\n while not self._needs_backout(spider):\n if not self._next_request_from_scheduler(spider):\n break\n\n if slot.start_requests and not self._needs_backout(spider):\n try:\n request = next(slot.start_requests)\n except StopIteration:\n slot.start_requests = None\n except Exception as exc:\n slot.start_requests = None\n log.err(None, 'Obtaining request from start requests', \\\n spider=spider)\n else:\n self.crawl(request, spider)\n\n if self.spider_is_idle(spider) and slot.close_if_idle:\n self._spider_idle(spider)\n\n def _needs_backout(self, spider):\n slot = self.slot\n return not self.running \\\n or slot.closing \\\n or self.downloader.needs_backout() \\\n or self.scraper.slot.needs_backout()\n\n def _next_request_from_scheduler(self, spider):\n slot = self.slot\n request = slot.scheduler.next_request()\n if not request:\n return\n d = self._download(request, spider)\n d.addBoth(self._handle_downloader_output, request, spider)\n d.addErrback(log.msg, spider=spider)\n d.addBoth(lambda _: slot.remove_request(request))\n d.addErrback(log.msg, spider=spider)\n d.addBoth(lambda _: slot.nextcall.schedule())\n d.addErrback(log.msg, spider=spider)\n return d\n\n def _handle_downloader_output(self, response, request, spider):\n assert isinstance(response, (Request, Response, Failure)), response\n # downloader middleware can return requests (for example, redirects)\n if isinstance(response, Request):\n self.crawl(response, spider)\n return\n # response is a Response or Failure\n d = self.scraper.enqueue_scrape(response, request, spider)\n d.addErrback(log.err, spider=spider)\n return d\n\n def spider_is_idle(self, spider):\n scraper_idle = self.scraper.slot.is_idle()\n pending = self.slot.scheduler.has_pending_requests()\n downloading = bool(self.downloader.active)\n pending_start_requests = self.slot.start_requests is not None\n idle = scraper_idle and not (pending or downloading or pending_start_requests)\n return idle\n\n @property\n def open_spiders(self):\n return [self.spider] if self.spider else []\n\n def has_capacity(self):\n \"\"\"Does the engine have capacity to handle more spiders\"\"\"\n return not bool(self.slot)\n\n def crawl(self, request, spider):\n assert spider in self.open_spiders, \\\n \"Spider %r not opened when crawling: %s\" % (spider.name, request)\n self.schedule(request, spider)\n self.slot.nextcall.schedule()\n\n def schedule(self, request, spider):\n self.signals.send_catch_log(signal=signals.request_scheduled,\n request=request, spider=spider)\n return self.slot.scheduler.enqueue_request(request)\n\n def download(self, request, spider):\n slot = self.slot\n slot.add_request(request)\n d = self._download(request, spider)\n d.addBoth(self._downloaded, slot, request, spider)\n return d\n\n def _downloaded(self, response, slot, request, spider):\n slot.remove_request(request)\n return self.download(response, spider) \\\n if isinstance(response, Request) else response\n\n def _download(self, request, spider):\n slot = self.slot\n slot.add_request(request)\n def _on_success(response):\n assert isinstance(response, (Response, Request))\n if isinstance(response, Response):\n response.request = request # tie request to response received\n logkws = self.logformatter.crawled(request, response, spider)\n log.msg(spider=spider, **logkws)\n self.signals.send_catch_log(signal=signals.response_received, \\\n response=response, request=request, spider=spider)\n return response\n\n def _on_complete(_):\n slot.nextcall.schedule()\n return _\n\n dwld = self.downloader.fetch(request, spider)\n dwld.addCallbacks(_on_success)\n dwld.addBoth(_on_complete)\n return dwld\n\n @defer.inlineCallbacks\n def open_spider(self, spider, start_requests=(), close_if_idle=True):\n assert self.has_capacity(), \"No free spider slot when opening %r\" % \\\n spider.name\n log.msg(\"Spider opened\", spider=spider)\n nextcall = CallLaterOnce(self._next_request, spider)\n scheduler = self.scheduler_cls.from_crawler(self.crawler)\n start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)\n slot = Slot(start_requests, close_if_idle, nextcall, scheduler)\n self.slot = slot\n self.spider = spider\n yield scheduler.open(spider)\n yield self.scraper.open_spider(spider)\n self.crawler.stats.open_spider(spider)\n yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)\n slot.nextcall.schedule()\n\n def _spider_idle(self, spider):\n \"\"\"Called when a spider gets idle. This function is called when there\n are no remaining pages to download or schedule. It can be called\n multiple times. If some extension raises a DontCloseSpider exception\n (in the spider_idle signal handler) the spider is not closed until the\n next loop and this function is guaranteed to be called (at least) once\n again for this spider.\n \"\"\"\n res = self.signals.send_catch_log(signal=signals.spider_idle, \\\n spider=spider, dont_log=DontCloseSpider)\n if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \\\n for _, x in res):\n self.slot.nextcall.schedule(5)\n return\n\n if self.spider_is_idle(spider):\n self.close_spider(spider, reason='finished')\n\n def close_spider(self, spider, reason='cancelled'):\n \"\"\"Close (cancel) spider and clear all its outstanding requests\"\"\"\n\n slot = self.slot\n if slot.closing:\n return slot.closing\n log.msg(format=\"Closing spider (%(reason)s)\", reason=reason, spider=spider)\n\n dfd = slot.close()\n\n dfd.addBoth(lambda _: self.downloader.close())\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: self.scraper.close_spider(spider))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: slot.scheduler.close(reason))\n dfd.addErrback(log.err, spider=spider)\n\n # XXX: spider_stats argument was added for backwards compatibility with\n # stats collection refactoring added in 0.15. it should be removed in 0.17.\n dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(signal=signals.spider_closed, \\\n spider=spider, reason=reason, spider_stats=self.crawler.stats.get_stats()))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: log.msg(format=\"Spider closed (%(reason)s)\", reason=reason, spider=spider))\n\n dfd.addBoth(lambda _: setattr(self, 'slot', None))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: setattr(self, 'spider', None))\n dfd.addErrback(log.err, spider=spider)\n\n dfd.addBoth(lambda _: self._spider_closed_callback(spider))\n\n return dfd\n\n def _close_all_spiders(self):\n dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]\n dlist = defer.DeferredList(dfds)\n return dlist\n\n @defer.inlineCallbacks\n def _finish_stopping_engine(self):\n yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)\n self._closewait.callback(None)\n", "path": "scrapy/core/engine.py"}]} | 3,859 | 192 |
gh_patches_debug_40845 | rasdani/github-patches | git_diff | encode__starlette-2017 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Middleware stacking and recursive initialization
### Discussed in https://github.com/encode/starlette/discussions/1161
<div type='discussions-op-text'>
<sup>Originally posted by **euri10** April 1, 2021</sup>
long story short I was writing a logging middleware whose handlers are initialized in the `__init__` of the middleware and ended up with multiple logs.
it seems to me the `__init__` of the middlewarea are recursively called because of the `build_middleware_stack` method and I'm not sure if this is intended or a bug.
here's a test case that shows what I think is an issue:
```
class SimpleInitializableMiddleware:
def __init__(self, app, counter: int):
self.app = app
self.counter = counter
print(self.counter)
def test_middleware_stack_init():
app = Starlette()
app.add_middleware(SimpleInitializableMiddleware, counter=1)
app.add_middleware(SimpleInitializableMiddleware, counter=2)
app.add_middleware(SimpleInitializableMiddleware, counter=3)
app.add_middleware(SimpleInitializableMiddleware, counter=4)
```
I would expect to see `1 2 3 4` as the output.
however it prints `1 1 2 1 2 3 1 2 3 4` because for each add_middlware call it goes down the stack and reinitialize it.
what you think @tomchristie ?</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/applications.py`
Content:
```
1 import typing
2 import warnings
3
4 from starlette.datastructures import State, URLPath
5 from starlette.middleware import Middleware
6 from starlette.middleware.base import BaseHTTPMiddleware
7 from starlette.middleware.errors import ServerErrorMiddleware
8 from starlette.middleware.exceptions import ExceptionMiddleware
9 from starlette.requests import Request
10 from starlette.responses import Response
11 from starlette.routing import BaseRoute, Router
12 from starlette.types import ASGIApp, Receive, Scope, Send
13
14
15 class Starlette:
16 """
17 Creates an application instance.
18
19 **Parameters:**
20
21 * **debug** - Boolean indicating if debug tracebacks should be returned on errors.
22 * **routes** - A list of routes to serve incoming HTTP and WebSocket requests.
23 * **middleware** - A list of middleware to run for every request. A starlette
24 application will always automatically include two middleware classes.
25 `ServerErrorMiddleware` is added as the very outermost middleware, to handle
26 any uncaught errors occurring anywhere in the entire stack.
27 `ExceptionMiddleware` is added as the very innermost middleware, to deal
28 with handled exception cases occurring in the routing or endpoints.
29 * **exception_handlers** - A mapping of either integer status codes,
30 or exception class types onto callables which handle the exceptions.
31 Exception handler callables should be of the form
32 `handler(request, exc) -> response` and may be be either standard functions, or
33 async functions.
34 * **on_startup** - A list of callables to run on application startup.
35 Startup handler callables do not take any arguments, and may be be either
36 standard functions, or async functions.
37 * **on_shutdown** - A list of callables to run on application shutdown.
38 Shutdown handler callables do not take any arguments, and may be be either
39 standard functions, or async functions.
40 """
41
42 def __init__(
43 self,
44 debug: bool = False,
45 routes: typing.Optional[typing.Sequence[BaseRoute]] = None,
46 middleware: typing.Optional[typing.Sequence[Middleware]] = None,
47 exception_handlers: typing.Optional[
48 typing.Mapping[
49 typing.Any,
50 typing.Callable[
51 [Request, Exception],
52 typing.Union[Response, typing.Awaitable[Response]],
53 ],
54 ]
55 ] = None,
56 on_startup: typing.Optional[typing.Sequence[typing.Callable]] = None,
57 on_shutdown: typing.Optional[typing.Sequence[typing.Callable]] = None,
58 lifespan: typing.Optional[
59 typing.Callable[["Starlette"], typing.AsyncContextManager]
60 ] = None,
61 ) -> None:
62 # The lifespan context function is a newer style that replaces
63 # on_startup / on_shutdown handlers. Use one or the other, not both.
64 assert lifespan is None or (
65 on_startup is None and on_shutdown is None
66 ), "Use either 'lifespan' or 'on_startup'/'on_shutdown', not both."
67
68 self._debug = debug
69 self.state = State()
70 self.router = Router(
71 routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan
72 )
73 self.exception_handlers = (
74 {} if exception_handlers is None else dict(exception_handlers)
75 )
76 self.user_middleware = [] if middleware is None else list(middleware)
77 self.middleware_stack = self.build_middleware_stack()
78
79 def build_middleware_stack(self) -> ASGIApp:
80 debug = self.debug
81 error_handler = None
82 exception_handlers: typing.Dict[
83 typing.Any, typing.Callable[[Request, Exception], Response]
84 ] = {}
85
86 for key, value in self.exception_handlers.items():
87 if key in (500, Exception):
88 error_handler = value
89 else:
90 exception_handlers[key] = value
91
92 middleware = (
93 [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]
94 + self.user_middleware
95 + [
96 Middleware(
97 ExceptionMiddleware, handlers=exception_handlers, debug=debug
98 )
99 ]
100 )
101
102 app = self.router
103 for cls, options in reversed(middleware):
104 app = cls(app=app, **options)
105 return app
106
107 @property
108 def routes(self) -> typing.List[BaseRoute]:
109 return self.router.routes
110
111 @property
112 def debug(self) -> bool:
113 return self._debug
114
115 @debug.setter
116 def debug(self, value: bool) -> None:
117 self._debug = value
118 self.middleware_stack = self.build_middleware_stack()
119
120 def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:
121 return self.router.url_path_for(name, **path_params)
122
123 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
124 scope["app"] = self
125 await self.middleware_stack(scope, receive, send)
126
127 def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover
128 return self.router.on_event(event_type)
129
130 def mount(
131 self, path: str, app: ASGIApp, name: typing.Optional[str] = None
132 ) -> None: # pragma: nocover
133 self.router.mount(path, app=app, name=name)
134
135 def host(
136 self, host: str, app: ASGIApp, name: typing.Optional[str] = None
137 ) -> None: # pragma: no cover
138 self.router.host(host, app=app, name=name)
139
140 def add_middleware(
141 self, middleware_class: type, **options: typing.Any
142 ) -> None: # pragma: no cover
143 self.user_middleware.insert(0, Middleware(middleware_class, **options))
144 self.middleware_stack = self.build_middleware_stack()
145
146 def add_exception_handler(
147 self,
148 exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],
149 handler: typing.Callable,
150 ) -> None: # pragma: no cover
151 self.exception_handlers[exc_class_or_status_code] = handler
152 self.middleware_stack = self.build_middleware_stack()
153
154 def add_event_handler(
155 self, event_type: str, func: typing.Callable
156 ) -> None: # pragma: no cover
157 self.router.add_event_handler(event_type, func)
158
159 def add_route(
160 self,
161 path: str,
162 route: typing.Callable,
163 methods: typing.Optional[typing.List[str]] = None,
164 name: typing.Optional[str] = None,
165 include_in_schema: bool = True,
166 ) -> None: # pragma: no cover
167 self.router.add_route(
168 path, route, methods=methods, name=name, include_in_schema=include_in_schema
169 )
170
171 def add_websocket_route(
172 self, path: str, route: typing.Callable, name: typing.Optional[str] = None
173 ) -> None: # pragma: no cover
174 self.router.add_websocket_route(path, route, name=name)
175
176 def exception_handler(
177 self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]]
178 ) -> typing.Callable:
179 warnings.warn(
180 "The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501
181 "Refer to https://www.starlette.io/exceptions/ for the recommended approach.", # noqa: E501
182 DeprecationWarning,
183 )
184
185 def decorator(func: typing.Callable) -> typing.Callable:
186 self.add_exception_handler(exc_class_or_status_code, func)
187 return func
188
189 return decorator
190
191 def route(
192 self,
193 path: str,
194 methods: typing.Optional[typing.List[str]] = None,
195 name: typing.Optional[str] = None,
196 include_in_schema: bool = True,
197 ) -> typing.Callable:
198 """
199 We no longer document this decorator style API, and its usage is discouraged.
200 Instead you should use the following approach:
201
202 >>> routes = [Route(path, endpoint=...), ...]
203 >>> app = Starlette(routes=routes)
204 """
205 warnings.warn(
206 "The `route` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501
207 "Refer to https://www.starlette.io/routing/ for the recommended approach.", # noqa: E501
208 DeprecationWarning,
209 )
210
211 def decorator(func: typing.Callable) -> typing.Callable:
212 self.router.add_route(
213 path,
214 func,
215 methods=methods,
216 name=name,
217 include_in_schema=include_in_schema,
218 )
219 return func
220
221 return decorator
222
223 def websocket_route(
224 self, path: str, name: typing.Optional[str] = None
225 ) -> typing.Callable:
226 """
227 We no longer document this decorator style API, and its usage is discouraged.
228 Instead you should use the following approach:
229
230 >>> routes = [WebSocketRoute(path, endpoint=...), ...]
231 >>> app = Starlette(routes=routes)
232 """
233 warnings.warn(
234 "The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501
235 "Refer to https://www.starlette.io/routing/#websocket-routing for the recommended approach.", # noqa: E501
236 DeprecationWarning,
237 )
238
239 def decorator(func: typing.Callable) -> typing.Callable:
240 self.router.add_websocket_route(path, func, name=name)
241 return func
242
243 return decorator
244
245 def middleware(self, middleware_type: str) -> typing.Callable:
246 """
247 We no longer document this decorator style API, and its usage is discouraged.
248 Instead you should use the following approach:
249
250 >>> middleware = [Middleware(...), ...]
251 >>> app = Starlette(middleware=middleware)
252 """
253 warnings.warn(
254 "The `middleware` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501
255 "Refer to https://www.starlette.io/middleware/#using-middleware for recommended approach.", # noqa: E501
256 DeprecationWarning,
257 )
258 assert (
259 middleware_type == "http"
260 ), 'Currently only middleware("http") is supported.'
261
262 def decorator(func: typing.Callable) -> typing.Callable:
263 self.add_middleware(BaseHTTPMiddleware, dispatch=func)
264 return func
265
266 return decorator
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/applications.py b/starlette/applications.py
--- a/starlette/applications.py
+++ b/starlette/applications.py
@@ -65,7 +65,7 @@
on_startup is None and on_shutdown is None
), "Use either 'lifespan' or 'on_startup'/'on_shutdown', not both."
- self._debug = debug
+ self.debug = debug
self.state = State()
self.router = Router(
routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan
@@ -74,7 +74,7 @@
{} if exception_handlers is None else dict(exception_handlers)
)
self.user_middleware = [] if middleware is None else list(middleware)
- self.middleware_stack = self.build_middleware_stack()
+ self.middleware_stack: typing.Optional[ASGIApp] = None
def build_middleware_stack(self) -> ASGIApp:
debug = self.debug
@@ -108,20 +108,13 @@
def routes(self) -> typing.List[BaseRoute]:
return self.router.routes
- @property
- def debug(self) -> bool:
- return self._debug
-
- @debug.setter
- def debug(self, value: bool) -> None:
- self._debug = value
- self.middleware_stack = self.build_middleware_stack()
-
def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:
return self.router.url_path_for(name, **path_params)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
scope["app"] = self
+ if self.middleware_stack is None:
+ self.middleware_stack = self.build_middleware_stack()
await self.middleware_stack(scope, receive, send)
def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover
@@ -137,11 +130,10 @@
) -> None: # pragma: no cover
self.router.host(host, app=app, name=name)
- def add_middleware(
- self, middleware_class: type, **options: typing.Any
- ) -> None: # pragma: no cover
+ def add_middleware(self, middleware_class: type, **options: typing.Any) -> None:
+ if self.middleware_stack is not None: # pragma: no cover
+ raise RuntimeError("Cannot add middleware after an application has started")
self.user_middleware.insert(0, Middleware(middleware_class, **options))
- self.middleware_stack = self.build_middleware_stack()
def add_exception_handler(
self,
@@ -149,7 +141,6 @@
handler: typing.Callable,
) -> None: # pragma: no cover
self.exception_handlers[exc_class_or_status_code] = handler
- self.middleware_stack = self.build_middleware_stack()
def add_event_handler(
self, event_type: str, func: typing.Callable
| {"golden_diff": "diff --git a/starlette/applications.py b/starlette/applications.py\n--- a/starlette/applications.py\n+++ b/starlette/applications.py\n@@ -65,7 +65,7 @@\n on_startup is None and on_shutdown is None\n ), \"Use either 'lifespan' or 'on_startup'/'on_shutdown', not both.\"\n \n- self._debug = debug\n+ self.debug = debug\n self.state = State()\n self.router = Router(\n routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan\n@@ -74,7 +74,7 @@\n {} if exception_handlers is None else dict(exception_handlers)\n )\n self.user_middleware = [] if middleware is None else list(middleware)\n- self.middleware_stack = self.build_middleware_stack()\n+ self.middleware_stack: typing.Optional[ASGIApp] = None\n \n def build_middleware_stack(self) -> ASGIApp:\n debug = self.debug\n@@ -108,20 +108,13 @@\n def routes(self) -> typing.List[BaseRoute]:\n return self.router.routes\n \n- @property\n- def debug(self) -> bool:\n- return self._debug\n-\n- @debug.setter\n- def debug(self, value: bool) -> None:\n- self._debug = value\n- self.middleware_stack = self.build_middleware_stack()\n-\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n return self.router.url_path_for(name, **path_params)\n \n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n scope[\"app\"] = self\n+ if self.middleware_stack is None:\n+ self.middleware_stack = self.build_middleware_stack()\n await self.middleware_stack(scope, receive, send)\n \n def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover\n@@ -137,11 +130,10 @@\n ) -> None: # pragma: no cover\n self.router.host(host, app=app, name=name)\n \n- def add_middleware(\n- self, middleware_class: type, **options: typing.Any\n- ) -> None: # pragma: no cover\n+ def add_middleware(self, middleware_class: type, **options: typing.Any) -> None:\n+ if self.middleware_stack is not None: # pragma: no cover\n+ raise RuntimeError(\"Cannot add middleware after an application has started\")\n self.user_middleware.insert(0, Middleware(middleware_class, **options))\n- self.middleware_stack = self.build_middleware_stack()\n \n def add_exception_handler(\n self,\n@@ -149,7 +141,6 @@\n handler: typing.Callable,\n ) -> None: # pragma: no cover\n self.exception_handlers[exc_class_or_status_code] = handler\n- self.middleware_stack = self.build_middleware_stack()\n \n def add_event_handler(\n self, event_type: str, func: typing.Callable\n", "issue": "Middleware stacking and recursive initialization\n### Discussed in https://github.com/encode/starlette/discussions/1161\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **euri10** April 1, 2021</sup>\r\nlong story short I was writing a logging middleware whose handlers are initialized in the `__init__` of the middleware and ended up with multiple logs.\r\n\r\nit seems to me the `__init__` of the middlewarea are recursively called because of the `build_middleware_stack` method and I'm not sure if this is intended or a bug.\r\n\r\nhere's a test case that shows what I think is an issue:\r\n\r\n```\r\nclass SimpleInitializableMiddleware:\r\n def __init__(self, app, counter: int):\r\n self.app = app\r\n self.counter = counter\r\n print(self.counter)\r\n\r\n\r\ndef test_middleware_stack_init():\r\n app = Starlette()\r\n app.add_middleware(SimpleInitializableMiddleware, counter=1)\r\n app.add_middleware(SimpleInitializableMiddleware, counter=2)\r\n app.add_middleware(SimpleInitializableMiddleware, counter=3)\r\n app.add_middleware(SimpleInitializableMiddleware, counter=4)\r\n```\r\n\r\nI would expect to see `1 2 3 4` as the output.\r\nhowever it prints `1 1 2 1 2 3 1 2 3 4` because for each add_middlware call it goes down the stack and reinitialize it.\r\n\r\nwhat you think @tomchristie ?</div>\n", "before_files": [{"content": "import typing\nimport warnings\n\nfrom starlette.datastructures import State, URLPath\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.base import BaseHTTPMiddleware\nfrom starlette.middleware.errors import ServerErrorMiddleware\nfrom starlette.middleware.exceptions import ExceptionMiddleware\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import BaseRoute, Router\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\n\nclass Starlette:\n \"\"\"\n Creates an application instance.\n\n **Parameters:**\n\n * **debug** - Boolean indicating if debug tracebacks should be returned on errors.\n * **routes** - A list of routes to serve incoming HTTP and WebSocket requests.\n * **middleware** - A list of middleware to run for every request. A starlette\n application will always automatically include two middleware classes.\n `ServerErrorMiddleware` is added as the very outermost middleware, to handle\n any uncaught errors occurring anywhere in the entire stack.\n `ExceptionMiddleware` is added as the very innermost middleware, to deal\n with handled exception cases occurring in the routing or endpoints.\n * **exception_handlers** - A mapping of either integer status codes,\n or exception class types onto callables which handle the exceptions.\n Exception handler callables should be of the form\n `handler(request, exc) -> response` and may be be either standard functions, or\n async functions.\n * **on_startup** - A list of callables to run on application startup.\n Startup handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n * **on_shutdown** - A list of callables to run on application shutdown.\n Shutdown handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n \"\"\"\n\n def __init__(\n self,\n debug: bool = False,\n routes: typing.Optional[typing.Sequence[BaseRoute]] = None,\n middleware: typing.Optional[typing.Sequence[Middleware]] = None,\n exception_handlers: typing.Optional[\n typing.Mapping[\n typing.Any,\n typing.Callable[\n [Request, Exception],\n typing.Union[Response, typing.Awaitable[Response]],\n ],\n ]\n ] = None,\n on_startup: typing.Optional[typing.Sequence[typing.Callable]] = None,\n on_shutdown: typing.Optional[typing.Sequence[typing.Callable]] = None,\n lifespan: typing.Optional[\n typing.Callable[[\"Starlette\"], typing.AsyncContextManager]\n ] = None,\n ) -> None:\n # The lifespan context function is a newer style that replaces\n # on_startup / on_shutdown handlers. Use one or the other, not both.\n assert lifespan is None or (\n on_startup is None and on_shutdown is None\n ), \"Use either 'lifespan' or 'on_startup'/'on_shutdown', not both.\"\n\n self._debug = debug\n self.state = State()\n self.router = Router(\n routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan\n )\n self.exception_handlers = (\n {} if exception_handlers is None else dict(exception_handlers)\n )\n self.user_middleware = [] if middleware is None else list(middleware)\n self.middleware_stack = self.build_middleware_stack()\n\n def build_middleware_stack(self) -> ASGIApp:\n debug = self.debug\n error_handler = None\n exception_handlers: typing.Dict[\n typing.Any, typing.Callable[[Request, Exception], Response]\n ] = {}\n\n for key, value in self.exception_handlers.items():\n if key in (500, Exception):\n error_handler = value\n else:\n exception_handlers[key] = value\n\n middleware = (\n [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]\n + self.user_middleware\n + [\n Middleware(\n ExceptionMiddleware, handlers=exception_handlers, debug=debug\n )\n ]\n )\n\n app = self.router\n for cls, options in reversed(middleware):\n app = cls(app=app, **options)\n return app\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return self.router.routes\n\n @property\n def debug(self) -> bool:\n return self._debug\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self._debug = value\n self.middleware_stack = self.build_middleware_stack()\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n return self.router.url_path_for(name, **path_params)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n scope[\"app\"] = self\n await self.middleware_stack(scope, receive, send)\n\n def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover\n return self.router.on_event(event_type)\n\n def mount(\n self, path: str, app: ASGIApp, name: typing.Optional[str] = None\n ) -> None: # pragma: nocover\n self.router.mount(path, app=app, name=name)\n\n def host(\n self, host: str, app: ASGIApp, name: typing.Optional[str] = None\n ) -> None: # pragma: no cover\n self.router.host(host, app=app, name=name)\n\n def add_middleware(\n self, middleware_class: type, **options: typing.Any\n ) -> None: # pragma: no cover\n self.user_middleware.insert(0, Middleware(middleware_class, **options))\n self.middleware_stack = self.build_middleware_stack()\n\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n handler: typing.Callable,\n ) -> None: # pragma: no cover\n self.exception_handlers[exc_class_or_status_code] = handler\n self.middleware_stack = self.build_middleware_stack()\n\n def add_event_handler(\n self, event_type: str, func: typing.Callable\n ) -> None: # pragma: no cover\n self.router.add_event_handler(event_type, func)\n\n def add_route(\n self,\n path: str,\n route: typing.Callable,\n methods: typing.Optional[typing.List[str]] = None,\n name: typing.Optional[str] = None,\n include_in_schema: bool = True,\n ) -> None: # pragma: no cover\n self.router.add_route(\n path, route, methods=methods, name=name, include_in_schema=include_in_schema\n )\n\n def add_websocket_route(\n self, path: str, route: typing.Callable, name: typing.Optional[str] = None\n ) -> None: # pragma: no cover\n self.router.add_websocket_route(path, route, name=name)\n\n def exception_handler(\n self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]]\n ) -> typing.Callable:\n warnings.warn(\n \"The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/exceptions/ for the recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_exception_handler(exc_class_or_status_code, func)\n return func\n\n return decorator\n\n def route(\n self,\n path: str,\n methods: typing.Optional[typing.List[str]] = None,\n name: typing.Optional[str] = None,\n include_in_schema: bool = True,\n ) -> typing.Callable:\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n >>> routes = [Route(path, endpoint=...), ...]\n >>> app = Starlette(routes=routes)\n \"\"\"\n warnings.warn(\n \"The `route` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/routing/ for the recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_route(\n path,\n func,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n return func\n\n return decorator\n\n def websocket_route(\n self, path: str, name: typing.Optional[str] = None\n ) -> typing.Callable:\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n >>> routes = [WebSocketRoute(path, endpoint=...), ...]\n >>> app = Starlette(routes=routes)\n \"\"\"\n warnings.warn(\n \"The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/routing/#websocket-routing for the recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_websocket_route(path, func, name=name)\n return func\n\n return decorator\n\n def middleware(self, middleware_type: str) -> typing.Callable:\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n >>> middleware = [Middleware(...), ...]\n >>> app = Starlette(middleware=middleware)\n \"\"\"\n warnings.warn(\n \"The `middleware` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/middleware/#using-middleware for recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n assert (\n middleware_type == \"http\"\n ), 'Currently only middleware(\"http\") is supported.'\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_middleware(BaseHTTPMiddleware, dispatch=func)\n return func\n\n return decorator\n", "path": "starlette/applications.py"}], "after_files": [{"content": "import typing\nimport warnings\n\nfrom starlette.datastructures import State, URLPath\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.base import BaseHTTPMiddleware\nfrom starlette.middleware.errors import ServerErrorMiddleware\nfrom starlette.middleware.exceptions import ExceptionMiddleware\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import BaseRoute, Router\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\n\nclass Starlette:\n \"\"\"\n Creates an application instance.\n\n **Parameters:**\n\n * **debug** - Boolean indicating if debug tracebacks should be returned on errors.\n * **routes** - A list of routes to serve incoming HTTP and WebSocket requests.\n * **middleware** - A list of middleware to run for every request. A starlette\n application will always automatically include two middleware classes.\n `ServerErrorMiddleware` is added as the very outermost middleware, to handle\n any uncaught errors occurring anywhere in the entire stack.\n `ExceptionMiddleware` is added as the very innermost middleware, to deal\n with handled exception cases occurring in the routing or endpoints.\n * **exception_handlers** - A mapping of either integer status codes,\n or exception class types onto callables which handle the exceptions.\n Exception handler callables should be of the form\n `handler(request, exc) -> response` and may be be either standard functions, or\n async functions.\n * **on_startup** - A list of callables to run on application startup.\n Startup handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n * **on_shutdown** - A list of callables to run on application shutdown.\n Shutdown handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n \"\"\"\n\n def __init__(\n self,\n debug: bool = False,\n routes: typing.Optional[typing.Sequence[BaseRoute]] = None,\n middleware: typing.Optional[typing.Sequence[Middleware]] = None,\n exception_handlers: typing.Optional[\n typing.Mapping[\n typing.Any,\n typing.Callable[\n [Request, Exception],\n typing.Union[Response, typing.Awaitable[Response]],\n ],\n ]\n ] = None,\n on_startup: typing.Optional[typing.Sequence[typing.Callable]] = None,\n on_shutdown: typing.Optional[typing.Sequence[typing.Callable]] = None,\n lifespan: typing.Optional[\n typing.Callable[[\"Starlette\"], typing.AsyncContextManager]\n ] = None,\n ) -> None:\n # The lifespan context function is a newer style that replaces\n # on_startup / on_shutdown handlers. Use one or the other, not both.\n assert lifespan is None or (\n on_startup is None and on_shutdown is None\n ), \"Use either 'lifespan' or 'on_startup'/'on_shutdown', not both.\"\n\n self.debug = debug\n self.state = State()\n self.router = Router(\n routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan\n )\n self.exception_handlers = (\n {} if exception_handlers is None else dict(exception_handlers)\n )\n self.user_middleware = [] if middleware is None else list(middleware)\n self.middleware_stack: typing.Optional[ASGIApp] = None\n\n def build_middleware_stack(self) -> ASGIApp:\n debug = self.debug\n error_handler = None\n exception_handlers: typing.Dict[\n typing.Any, typing.Callable[[Request, Exception], Response]\n ] = {}\n\n for key, value in self.exception_handlers.items():\n if key in (500, Exception):\n error_handler = value\n else:\n exception_handlers[key] = value\n\n middleware = (\n [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]\n + self.user_middleware\n + [\n Middleware(\n ExceptionMiddleware, handlers=exception_handlers, debug=debug\n )\n ]\n )\n\n app = self.router\n for cls, options in reversed(middleware):\n app = cls(app=app, **options)\n return app\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return self.router.routes\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n return self.router.url_path_for(name, **path_params)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n scope[\"app\"] = self\n if self.middleware_stack is None:\n self.middleware_stack = self.build_middleware_stack()\n await self.middleware_stack(scope, receive, send)\n\n def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover\n return self.router.on_event(event_type)\n\n def mount(\n self, path: str, app: ASGIApp, name: typing.Optional[str] = None\n ) -> None: # pragma: nocover\n self.router.mount(path, app=app, name=name)\n\n def host(\n self, host: str, app: ASGIApp, name: typing.Optional[str] = None\n ) -> None: # pragma: no cover\n self.router.host(host, app=app, name=name)\n\n def add_middleware(self, middleware_class: type, **options: typing.Any) -> None:\n if self.middleware_stack is not None: # pragma: no cover\n raise RuntimeError(\"Cannot add middleware after an application has started\")\n self.user_middleware.insert(0, Middleware(middleware_class, **options))\n\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n handler: typing.Callable,\n ) -> None: # pragma: no cover\n self.exception_handlers[exc_class_or_status_code] = handler\n\n def add_event_handler(\n self, event_type: str, func: typing.Callable\n ) -> None: # pragma: no cover\n self.router.add_event_handler(event_type, func)\n\n def add_route(\n self,\n path: str,\n route: typing.Callable,\n methods: typing.Optional[typing.List[str]] = None,\n name: typing.Optional[str] = None,\n include_in_schema: bool = True,\n ) -> None: # pragma: no cover\n self.router.add_route(\n path, route, methods=methods, name=name, include_in_schema=include_in_schema\n )\n\n def add_websocket_route(\n self, path: str, route: typing.Callable, name: typing.Optional[str] = None\n ) -> None: # pragma: no cover\n self.router.add_websocket_route(path, route, name=name)\n\n def exception_handler(\n self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]]\n ) -> typing.Callable:\n warnings.warn(\n \"The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/exceptions/ for the recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_exception_handler(exc_class_or_status_code, func)\n return func\n\n return decorator\n\n def route(\n self,\n path: str,\n methods: typing.Optional[typing.List[str]] = None,\n name: typing.Optional[str] = None,\n include_in_schema: bool = True,\n ) -> typing.Callable:\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n >>> routes = [Route(path, endpoint=...), ...]\n >>> app = Starlette(routes=routes)\n \"\"\"\n warnings.warn(\n \"The `route` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/routing/ for the recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_route(\n path,\n func,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n return func\n\n return decorator\n\n def websocket_route(\n self, path: str, name: typing.Optional[str] = None\n ) -> typing.Callable:\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n >>> routes = [WebSocketRoute(path, endpoint=...), ...]\n >>> app = Starlette(routes=routes)\n \"\"\"\n warnings.warn(\n \"The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/routing/#websocket-routing for the recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_websocket_route(path, func, name=name)\n return func\n\n return decorator\n\n def middleware(self, middleware_type: str) -> typing.Callable:\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n >>> middleware = [Middleware(...), ...]\n >>> app = Starlette(middleware=middleware)\n \"\"\"\n warnings.warn(\n \"The `middleware` decorator is deprecated, and will be removed in version 1.0.0. \" # noqa: E501\n \"Refer to https://www.starlette.io/middleware/#using-middleware for recommended approach.\", # noqa: E501\n DeprecationWarning,\n )\n assert (\n middleware_type == \"http\"\n ), 'Currently only middleware(\"http\") is supported.'\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_middleware(BaseHTTPMiddleware, dispatch=func)\n return func\n\n return decorator\n", "path": "starlette/applications.py"}]} | 3,557 | 682 |
gh_patches_debug_13725 | rasdani/github-patches | git_diff | boto__boto-1812 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
setup.py and cheeseshop releases do not list dependencies
The boto release packages do not enumerate dependencies in setup.py, so they are not installed if you easy_install or pip install boto. As it is, the cloudsearch module is not functional out-of-the-box due to missing dependencies after install (requires requests in some version range)
I notice that you have a requirements.txt, which is useful, but is there any reason the set needed for deployment is not also listed setup.py in the "install_requires" / "tests_require" sections as most packages do?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/cloudsearch/document.py`
Content:
```
1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
3 # All Rights Reserved
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish, dis-
9 # tribute, sublicense, and/or sell copies of the Software, and to permit
10 # persons to whom the Software is furnished to do so, subject to the fol-
11 # lowing conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 # IN THE SOFTWARE.
23 #
24
25 import boto.exception
26 from boto.compat import json
27 import requests
28 import boto
29
30 class SearchServiceException(Exception):
31 pass
32
33
34 class CommitMismatchError(Exception):
35 pass
36
37 class EncodingError(Exception):
38 """
39 Content sent for Cloud Search indexing was incorrectly encoded.
40
41 This usually happens when a document is marked as unicode but non-unicode
42 characters are present.
43 """
44 pass
45
46 class ContentTooLongError(Exception):
47 """
48 Content sent for Cloud Search indexing was too long
49
50 This will usually happen when documents queued for indexing add up to more
51 than the limit allowed per upload batch (5MB)
52
53 """
54 pass
55
56 class DocumentServiceConnection(object):
57 """
58 A CloudSearch document service.
59
60 The DocumentServiceConection is used to add, remove and update documents in
61 CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format).
62
63 To generate an appropriate SDF, use :func:`add` to add or update documents,
64 as well as :func:`delete` to remove documents.
65
66 Once the set of documents is ready to be index, use :func:`commit` to send the
67 commands to CloudSearch.
68
69 If there are a lot of documents to index, it may be preferable to split the
70 generation of SDF data and the actual uploading into CloudSearch. Retrieve
71 the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
72 it can be retrieved back afterwards for upload into CloudSearch using
73 :func:`add_sdf_from_s3`.
74
75 The SDF is not cleared after a :func:`commit`. If you wish to continue
76 using the DocumentServiceConnection for another batch upload of commands,
77 you will need to :func:`clear_sdf` first to stop the previous batch of
78 commands from being uploaded again.
79
80 """
81
82 def __init__(self, domain=None, endpoint=None):
83 self.domain = domain
84 self.endpoint = endpoint
85 if not self.endpoint:
86 self.endpoint = domain.doc_service_endpoint
87 self.documents_batch = []
88 self._sdf = None
89
90 def add(self, _id, version, fields, lang='en'):
91 """
92 Add a document to be processed by the DocumentService
93
94 The document will not actually be added until :func:`commit` is called
95
96 :type _id: string
97 :param _id: A unique ID used to refer to this document.
98
99 :type version: int
100 :param version: Version of the document being indexed. If a file is
101 being reindexed, the version should be higher than the existing one
102 in CloudSearch.
103
104 :type fields: dict
105 :param fields: A dictionary of key-value pairs to be uploaded .
106
107 :type lang: string
108 :param lang: The language code the data is in. Only 'en' is currently
109 supported
110 """
111
112 d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,
113 'fields': fields}
114 self.documents_batch.append(d)
115
116 def delete(self, _id, version):
117 """
118 Schedule a document to be removed from the CloudSearch service
119
120 The document will not actually be scheduled for removal until :func:`commit` is called
121
122 :type _id: string
123 :param _id: The unique ID of this document.
124
125 :type version: int
126 :param version: Version of the document to remove. The delete will only
127 occur if this version number is higher than the version currently
128 in the index.
129 """
130
131 d = {'type': 'delete', 'id': _id, 'version': version}
132 self.documents_batch.append(d)
133
134 def get_sdf(self):
135 """
136 Generate the working set of documents in Search Data Format (SDF)
137
138 :rtype: string
139 :returns: JSON-formatted string of the documents in SDF
140 """
141
142 return self._sdf if self._sdf else json.dumps(self.documents_batch)
143
144 def clear_sdf(self):
145 """
146 Clear the working documents from this DocumentServiceConnection
147
148 This should be used after :func:`commit` if the connection will be reused
149 for another set of documents.
150 """
151
152 self._sdf = None
153 self.documents_batch = []
154
155 def add_sdf_from_s3(self, key_obj):
156 """
157 Load an SDF from S3
158
159 Using this method will result in documents added through
160 :func:`add` and :func:`delete` being ignored.
161
162 :type key_obj: :class:`boto.s3.key.Key`
163 :param key_obj: An S3 key which contains an SDF
164 """
165 #@todo:: (lucas) would be nice if this could just take an s3://uri..."
166
167 self._sdf = key_obj.get_contents_as_string()
168
169 def commit(self):
170 """
171 Actually send an SDF to CloudSearch for processing
172
173 If an SDF file has been explicitly loaded it will be used. Otherwise,
174 documents added through :func:`add` and :func:`delete` will be used.
175
176 :rtype: :class:`CommitResponse`
177 :returns: A summary of documents added and deleted
178 """
179
180 sdf = self.get_sdf()
181
182 if ': null' in sdf:
183 boto.log.error('null value in sdf detected. This will probably raise '
184 '500 error.')
185 index = sdf.index(': null')
186 boto.log.error(sdf[index - 100:index + 100])
187
188 url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
189
190 # Keep-alive is automatic in a post-1.0 requests world.
191 session = requests.Session()
192 adapter = requests.adapters.HTTPAdapter(
193 pool_connections=20,
194 pool_maxsize=50
195 )
196 # Now kludge in the right number of retries.
197 # Once we're requiring ``requests>=1.2.1``, this can become an
198 # initialization parameter above.
199 adapter.max_retries = 5
200 session.mount('http://', adapter)
201 session.mount('https://', adapter)
202 r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
203
204 return CommitResponse(r, self, sdf)
205
206
207 class CommitResponse(object):
208 """Wrapper for response to Cloudsearch document batch commit.
209
210 :type response: :class:`requests.models.Response`
211 :param response: Response from Cloudsearch /documents/batch API
212
213 :type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`
214 :param doc_service: Object containing the documents posted and methods to
215 retry
216
217 :raises: :class:`boto.exception.BotoServerError`
218 :raises: :class:`boto.cloudsearch.document.SearchServiceException`
219 :raises: :class:`boto.cloudsearch.document.EncodingError`
220 :raises: :class:`boto.cloudsearch.document.ContentTooLongError`
221 """
222 def __init__(self, response, doc_service, sdf):
223 self.response = response
224 self.doc_service = doc_service
225 self.sdf = sdf
226
227 try:
228 self.content = json.loads(response.content)
229 except:
230 boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n'
231 'SDF:\n{1}'.format(response.content, self.sdf))
232 raise boto.exception.BotoServerError(self.response.status_code, '',
233 body=response.content)
234
235 self.status = self.content['status']
236 if self.status == 'error':
237 self.errors = [e.get('message') for e in self.content.get('errors',
238 [])]
239 for e in self.errors:
240 if "Illegal Unicode character" in e:
241 raise EncodingError("Illegal Unicode character in document")
242 elif e == "The Content-Length is too long":
243 raise ContentTooLongError("Content was too long")
244 else:
245 self.errors = []
246
247 self.adds = self.content['adds']
248 self.deletes = self.content['deletes']
249 self._check_num_ops('add', self.adds)
250 self._check_num_ops('delete', self.deletes)
251
252 def _check_num_ops(self, type_, response_num):
253 """Raise exception if number of ops in response doesn't match commit
254
255 :type type_: str
256 :param type_: Type of commit operation: 'add' or 'delete'
257
258 :type response_num: int
259 :param response_num: Number of adds or deletes in the response.
260
261 :raises: :class:`boto.cloudsearch.document.CommitMismatchError`
262 """
263 commit_num = len([d for d in self.doc_service.documents_batch
264 if d['type'] == type_])
265
266 if response_num != commit_num:
267 raise CommitMismatchError(
268 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\
269 .format(type_, commit_num, response_num))
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boto/cloudsearch/document.py b/boto/cloudsearch/document.py
--- a/boto/cloudsearch/document.py
+++ b/boto/cloudsearch/document.py
@@ -191,12 +191,9 @@
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
- pool_maxsize=50
+ pool_maxsize=50,
+ max_retries=5
)
- # Now kludge in the right number of retries.
- # Once we're requiring ``requests>=1.2.1``, this can become an
- # initialization parameter above.
- adapter.max_retries = 5
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
| {"golden_diff": "diff --git a/boto/cloudsearch/document.py b/boto/cloudsearch/document.py\n--- a/boto/cloudsearch/document.py\n+++ b/boto/cloudsearch/document.py\n@@ -191,12 +191,9 @@\n session = requests.Session()\n adapter = requests.adapters.HTTPAdapter(\n pool_connections=20,\n- pool_maxsize=50\n+ pool_maxsize=50,\n+ max_retries=5\n )\n- # Now kludge in the right number of retries.\n- # Once we're requiring ``requests>=1.2.1``, this can become an\n- # initialization parameter above.\n- adapter.max_retries = 5\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})\n", "issue": "setup.py and cheeseshop releases do not list dependencies \nThe boto release packages do not enumerate dependencies in setup.py, so they are not installed if you easy_install or pip install boto. As it is, the cloudsearch module is not functional out-of-the-box due to missing dependencies after install (requires requests in some version range)\n\nI notice that you have a requirements.txt, which is useful, but is there any reason the set needed for deployment is not also listed setup.py in the \"install_requires\" / \"tests_require\" sections as most packages do? \n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.\n# All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nimport boto.exception\nfrom boto.compat import json\nimport requests\nimport boto\n\nclass SearchServiceException(Exception):\n pass\n\n\nclass CommitMismatchError(Exception):\n pass\n\nclass EncodingError(Exception):\n \"\"\"\n Content sent for Cloud Search indexing was incorrectly encoded.\n\n This usually happens when a document is marked as unicode but non-unicode\n characters are present.\n \"\"\"\n pass\n\nclass ContentTooLongError(Exception):\n \"\"\"\n Content sent for Cloud Search indexing was too long\n\n This will usually happen when documents queued for indexing add up to more\n than the limit allowed per upload batch (5MB)\n\n \"\"\"\n pass\n\nclass DocumentServiceConnection(object):\n \"\"\"\n A CloudSearch document service.\n\n The DocumentServiceConection is used to add, remove and update documents in\n CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format).\n\n To generate an appropriate SDF, use :func:`add` to add or update documents,\n as well as :func:`delete` to remove documents.\n\n Once the set of documents is ready to be index, use :func:`commit` to send the\n commands to CloudSearch.\n\n If there are a lot of documents to index, it may be preferable to split the\n generation of SDF data and the actual uploading into CloudSearch. Retrieve\n the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,\n it can be retrieved back afterwards for upload into CloudSearch using\n :func:`add_sdf_from_s3`.\n\n The SDF is not cleared after a :func:`commit`. If you wish to continue\n using the DocumentServiceConnection for another batch upload of commands,\n you will need to :func:`clear_sdf` first to stop the previous batch of\n commands from being uploaded again.\n\n \"\"\"\n\n def __init__(self, domain=None, endpoint=None):\n self.domain = domain\n self.endpoint = endpoint\n if not self.endpoint:\n self.endpoint = domain.doc_service_endpoint\n self.documents_batch = []\n self._sdf = None\n\n def add(self, _id, version, fields, lang='en'):\n \"\"\"\n Add a document to be processed by the DocumentService\n\n The document will not actually be added until :func:`commit` is called\n\n :type _id: string\n :param _id: A unique ID used to refer to this document.\n\n :type version: int\n :param version: Version of the document being indexed. If a file is\n being reindexed, the version should be higher than the existing one\n in CloudSearch.\n\n :type fields: dict\n :param fields: A dictionary of key-value pairs to be uploaded .\n\n :type lang: string\n :param lang: The language code the data is in. Only 'en' is currently\n supported\n \"\"\"\n\n d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,\n 'fields': fields}\n self.documents_batch.append(d)\n\n def delete(self, _id, version):\n \"\"\"\n Schedule a document to be removed from the CloudSearch service\n\n The document will not actually be scheduled for removal until :func:`commit` is called\n\n :type _id: string\n :param _id: The unique ID of this document.\n\n :type version: int\n :param version: Version of the document to remove. The delete will only\n occur if this version number is higher than the version currently\n in the index.\n \"\"\"\n\n d = {'type': 'delete', 'id': _id, 'version': version}\n self.documents_batch.append(d)\n\n def get_sdf(self):\n \"\"\"\n Generate the working set of documents in Search Data Format (SDF)\n\n :rtype: string\n :returns: JSON-formatted string of the documents in SDF\n \"\"\"\n\n return self._sdf if self._sdf else json.dumps(self.documents_batch)\n\n def clear_sdf(self):\n \"\"\"\n Clear the working documents from this DocumentServiceConnection\n\n This should be used after :func:`commit` if the connection will be reused\n for another set of documents.\n \"\"\"\n\n self._sdf = None\n self.documents_batch = []\n\n def add_sdf_from_s3(self, key_obj):\n \"\"\"\n Load an SDF from S3\n\n Using this method will result in documents added through\n :func:`add` and :func:`delete` being ignored.\n\n :type key_obj: :class:`boto.s3.key.Key`\n :param key_obj: An S3 key which contains an SDF\n \"\"\"\n #@todo:: (lucas) would be nice if this could just take an s3://uri...\"\n\n self._sdf = key_obj.get_contents_as_string()\n\n def commit(self):\n \"\"\"\n Actually send an SDF to CloudSearch for processing\n\n If an SDF file has been explicitly loaded it will be used. Otherwise,\n documents added through :func:`add` and :func:`delete` will be used.\n\n :rtype: :class:`CommitResponse`\n :returns: A summary of documents added and deleted\n \"\"\"\n\n sdf = self.get_sdf()\n\n if ': null' in sdf:\n boto.log.error('null value in sdf detected. This will probably raise '\n '500 error.')\n index = sdf.index(': null')\n boto.log.error(sdf[index - 100:index + 100])\n\n url = \"http://%s/2011-02-01/documents/batch\" % (self.endpoint)\n\n # Keep-alive is automatic in a post-1.0 requests world.\n session = requests.Session()\n adapter = requests.adapters.HTTPAdapter(\n pool_connections=20,\n pool_maxsize=50\n )\n # Now kludge in the right number of retries.\n # Once we're requiring ``requests>=1.2.1``, this can become an\n # initialization parameter above.\n adapter.max_retries = 5\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})\n\n return CommitResponse(r, self, sdf)\n\n\nclass CommitResponse(object):\n \"\"\"Wrapper for response to Cloudsearch document batch commit.\n\n :type response: :class:`requests.models.Response`\n :param response: Response from Cloudsearch /documents/batch API\n\n :type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`\n :param doc_service: Object containing the documents posted and methods to\n retry\n\n :raises: :class:`boto.exception.BotoServerError`\n :raises: :class:`boto.cloudsearch.document.SearchServiceException`\n :raises: :class:`boto.cloudsearch.document.EncodingError`\n :raises: :class:`boto.cloudsearch.document.ContentTooLongError`\n \"\"\"\n def __init__(self, response, doc_service, sdf):\n self.response = response\n self.doc_service = doc_service\n self.sdf = sdf\n\n try:\n self.content = json.loads(response.content)\n except:\n boto.log.error('Error indexing documents.\\nResponse Content:\\n{0}\\n\\n'\n 'SDF:\\n{1}'.format(response.content, self.sdf))\n raise boto.exception.BotoServerError(self.response.status_code, '',\n body=response.content)\n\n self.status = self.content['status']\n if self.status == 'error':\n self.errors = [e.get('message') for e in self.content.get('errors',\n [])]\n for e in self.errors:\n if \"Illegal Unicode character\" in e:\n raise EncodingError(\"Illegal Unicode character in document\")\n elif e == \"The Content-Length is too long\":\n raise ContentTooLongError(\"Content was too long\")\n else:\n self.errors = []\n\n self.adds = self.content['adds']\n self.deletes = self.content['deletes']\n self._check_num_ops('add', self.adds)\n self._check_num_ops('delete', self.deletes)\n\n def _check_num_ops(self, type_, response_num):\n \"\"\"Raise exception if number of ops in response doesn't match commit\n\n :type type_: str\n :param type_: Type of commit operation: 'add' or 'delete'\n\n :type response_num: int\n :param response_num: Number of adds or deletes in the response.\n\n :raises: :class:`boto.cloudsearch.document.CommitMismatchError`\n \"\"\"\n commit_num = len([d for d in self.doc_service.documents_batch\n if d['type'] == type_])\n\n if response_num != commit_num:\n raise CommitMismatchError(\n 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\\\n .format(type_, commit_num, response_num))\n", "path": "boto/cloudsearch/document.py"}], "after_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.\n# All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nimport boto.exception\nfrom boto.compat import json\nimport requests\nimport boto\n\nclass SearchServiceException(Exception):\n pass\n\n\nclass CommitMismatchError(Exception):\n pass\n\nclass EncodingError(Exception):\n \"\"\"\n Content sent for Cloud Search indexing was incorrectly encoded.\n\n This usually happens when a document is marked as unicode but non-unicode\n characters are present.\n \"\"\"\n pass\n\nclass ContentTooLongError(Exception):\n \"\"\"\n Content sent for Cloud Search indexing was too long\n\n This will usually happen when documents queued for indexing add up to more\n than the limit allowed per upload batch (5MB)\n\n \"\"\"\n pass\n\nclass DocumentServiceConnection(object):\n \"\"\"\n A CloudSearch document service.\n\n The DocumentServiceConection is used to add, remove and update documents in\n CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format).\n\n To generate an appropriate SDF, use :func:`add` to add or update documents,\n as well as :func:`delete` to remove documents.\n\n Once the set of documents is ready to be index, use :func:`commit` to send the\n commands to CloudSearch.\n\n If there are a lot of documents to index, it may be preferable to split the\n generation of SDF data and the actual uploading into CloudSearch. Retrieve\n the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,\n it can be retrieved back afterwards for upload into CloudSearch using\n :func:`add_sdf_from_s3`.\n\n The SDF is not cleared after a :func:`commit`. If you wish to continue\n using the DocumentServiceConnection for another batch upload of commands,\n you will need to :func:`clear_sdf` first to stop the previous batch of\n commands from being uploaded again.\n\n \"\"\"\n\n def __init__(self, domain=None, endpoint=None):\n self.domain = domain\n self.endpoint = endpoint\n if not self.endpoint:\n self.endpoint = domain.doc_service_endpoint\n self.documents_batch = []\n self._sdf = None\n\n def add(self, _id, version, fields, lang='en'):\n \"\"\"\n Add a document to be processed by the DocumentService\n\n The document will not actually be added until :func:`commit` is called\n\n :type _id: string\n :param _id: A unique ID used to refer to this document.\n\n :type version: int\n :param version: Version of the document being indexed. If a file is\n being reindexed, the version should be higher than the existing one\n in CloudSearch.\n\n :type fields: dict\n :param fields: A dictionary of key-value pairs to be uploaded .\n\n :type lang: string\n :param lang: The language code the data is in. Only 'en' is currently\n supported\n \"\"\"\n\n d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,\n 'fields': fields}\n self.documents_batch.append(d)\n\n def delete(self, _id, version):\n \"\"\"\n Schedule a document to be removed from the CloudSearch service\n\n The document will not actually be scheduled for removal until :func:`commit` is called\n\n :type _id: string\n :param _id: The unique ID of this document.\n\n :type version: int\n :param version: Version of the document to remove. The delete will only\n occur if this version number is higher than the version currently\n in the index.\n \"\"\"\n\n d = {'type': 'delete', 'id': _id, 'version': version}\n self.documents_batch.append(d)\n\n def get_sdf(self):\n \"\"\"\n Generate the working set of documents in Search Data Format (SDF)\n\n :rtype: string\n :returns: JSON-formatted string of the documents in SDF\n \"\"\"\n\n return self._sdf if self._sdf else json.dumps(self.documents_batch)\n\n def clear_sdf(self):\n \"\"\"\n Clear the working documents from this DocumentServiceConnection\n\n This should be used after :func:`commit` if the connection will be reused\n for another set of documents.\n \"\"\"\n\n self._sdf = None\n self.documents_batch = []\n\n def add_sdf_from_s3(self, key_obj):\n \"\"\"\n Load an SDF from S3\n\n Using this method will result in documents added through\n :func:`add` and :func:`delete` being ignored.\n\n :type key_obj: :class:`boto.s3.key.Key`\n :param key_obj: An S3 key which contains an SDF\n \"\"\"\n #@todo:: (lucas) would be nice if this could just take an s3://uri...\"\n\n self._sdf = key_obj.get_contents_as_string()\n\n def commit(self):\n \"\"\"\n Actually send an SDF to CloudSearch for processing\n\n If an SDF file has been explicitly loaded it will be used. Otherwise,\n documents added through :func:`add` and :func:`delete` will be used.\n\n :rtype: :class:`CommitResponse`\n :returns: A summary of documents added and deleted\n \"\"\"\n\n sdf = self.get_sdf()\n\n if ': null' in sdf:\n boto.log.error('null value in sdf detected. This will probably raise '\n '500 error.')\n index = sdf.index(': null')\n boto.log.error(sdf[index - 100:index + 100])\n\n url = \"http://%s/2011-02-01/documents/batch\" % (self.endpoint)\n\n # Keep-alive is automatic in a post-1.0 requests world.\n session = requests.Session()\n adapter = requests.adapters.HTTPAdapter(\n pool_connections=20,\n pool_maxsize=50,\n max_retries=5\n )\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})\n\n return CommitResponse(r, self, sdf)\n\n\nclass CommitResponse(object):\n \"\"\"Wrapper for response to Cloudsearch document batch commit.\n\n :type response: :class:`requests.models.Response`\n :param response: Response from Cloudsearch /documents/batch API\n\n :type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`\n :param doc_service: Object containing the documents posted and methods to\n retry\n\n :raises: :class:`boto.exception.BotoServerError`\n :raises: :class:`boto.cloudsearch.document.SearchServiceException`\n :raises: :class:`boto.cloudsearch.document.EncodingError`\n :raises: :class:`boto.cloudsearch.document.ContentTooLongError`\n \"\"\"\n def __init__(self, response, doc_service, sdf):\n self.response = response\n self.doc_service = doc_service\n self.sdf = sdf\n\n try:\n self.content = json.loads(response.content)\n except:\n boto.log.error('Error indexing documents.\\nResponse Content:\\n{0}\\n\\n'\n 'SDF:\\n{1}'.format(response.content, self.sdf))\n raise boto.exception.BotoServerError(self.response.status_code, '',\n body=response.content)\n\n self.status = self.content['status']\n if self.status == 'error':\n self.errors = [e.get('message') for e in self.content.get('errors',\n [])]\n for e in self.errors:\n if \"Illegal Unicode character\" in e:\n raise EncodingError(\"Illegal Unicode character in document\")\n elif e == \"The Content-Length is too long\":\n raise ContentTooLongError(\"Content was too long\")\n else:\n self.errors = []\n\n self.adds = self.content['adds']\n self.deletes = self.content['deletes']\n self._check_num_ops('add', self.adds)\n self._check_num_ops('delete', self.deletes)\n\n def _check_num_ops(self, type_, response_num):\n \"\"\"Raise exception if number of ops in response doesn't match commit\n\n :type type_: str\n :param type_: Type of commit operation: 'add' or 'delete'\n\n :type response_num: int\n :param response_num: Number of adds or deletes in the response.\n\n :raises: :class:`boto.cloudsearch.document.CommitMismatchError`\n \"\"\"\n commit_num = len([d for d in self.doc_service.documents_batch\n if d['type'] == type_])\n\n if response_num != commit_num:\n raise CommitMismatchError(\n 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\\\n .format(type_, commit_num, response_num))\n", "path": "boto/cloudsearch/document.py"}]} | 3,314 | 189 |
gh_patches_debug_12052 | rasdani/github-patches | git_diff | pallets__click-993 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't build documentation with python 3
```
> make html
...
Exception occurred:
File "/home/user/click-6.7/docs/clickdoctools.py", line 73, in isolation
if isinstance(input, unicode):
NameError: name 'unicode' is not defined
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/clickdoctools.py`
Content:
```
1 import os
2 import sys
3 import click
4 import shutil
5 import tempfile
6 import contextlib
7 import subprocess
8
9 try:
10 from StringIO import StringIO
11 except ImportError:
12 from io import StringIO
13
14 from docutils import nodes
15 from docutils.statemachine import ViewList
16
17 from sphinx.domains import Domain
18 from sphinx.util.compat import Directive
19
20
21 class EchoingStdin(object):
22
23 def __init__(self, input, output):
24 self._input = input
25 self._output = output
26
27 def __getattr__(self, x):
28 return getattr(self._input, x)
29
30 def _echo(self, rv):
31 mark = False
32 if rv.endswith('\xff'):
33 rv = rv[:-1]
34 mark = True
35 self._output.write(rv)
36 if mark:
37 self._output.write('^D\n')
38 return rv
39
40 def read(self, n=-1):
41 return self._echo(self._input.read(n))
42
43 def readline(self, n=-1):
44 return self._echo(self._input.readline(n))
45
46 def readlines(self):
47 return [self._echo(x) for x in self._input.readlines()]
48
49 def __iter__(self):
50 return iter(self._echo(x) for x in self._input)
51
52
53 @contextlib.contextmanager
54 def fake_modules():
55 old_call = subprocess.call
56 def dummy_call(*args, **kwargs):
57 with tempfile.TemporaryFile('wb+') as f:
58 kwargs['stdout'] = f
59 kwargs['stderr'] = f
60 rv = subprocess.Popen(*args, **kwargs).wait()
61 f.seek(0)
62 click.echo(f.read().decode('utf-8', 'replace').rstrip())
63 return rv
64 subprocess.call = dummy_call
65 try:
66 yield
67 finally:
68 subprocess.call = old_call
69
70
71 @contextlib.contextmanager
72 def isolation(input=None, env=None):
73 if isinstance(input, unicode):
74 input = input.encode('utf-8')
75 input = StringIO(input or '')
76 output = StringIO()
77 sys.stdin = EchoingStdin(input, output)
78 sys.stdin.encoding = 'utf-8'
79
80 def visible_input(prompt=None):
81 sys.stdout.write(prompt or '')
82 val = input.readline().rstrip('\r\n')
83 sys.stdout.write(val + '\n')
84 sys.stdout.flush()
85 return val
86
87 def hidden_input(prompt=None):
88 sys.stdout.write((prompt or '') + '\n')
89 sys.stdout.flush()
90 return input.readline().rstrip('\r\n')
91
92 sys.stdout = output
93 sys.stderr = output
94 old_visible_prompt_func = click.termui.visible_prompt_func
95 old_hidden_prompt_func = click.termui.hidden_prompt_func
96 click.termui.visible_prompt_func = visible_input
97 click.termui.hidden_prompt_func = hidden_input
98
99 old_env = {}
100 try:
101 if env:
102 for key, value in env.iteritems():
103 old_env[key] = os.environ.get(value)
104 os.environ[key] = value
105 yield output
106 finally:
107 for key, value in old_env.iteritems():
108 if value is None:
109 try:
110 del os.environ[key]
111 except Exception:
112 pass
113 else:
114 os.environ[key] = value
115 sys.stdout = sys.__stdout__
116 sys.stderr = sys.__stderr__
117 click.termui.visible_prompt_func = old_visible_prompt_func
118 click.termui.hidden_prompt_func = old_hidden_prompt_func
119
120
121 @contextlib.contextmanager
122 def isolated_filesystem():
123 cwd = os.getcwd()
124 t = tempfile.mkdtemp()
125 os.chdir(t)
126 try:
127 yield
128 finally:
129 os.chdir(cwd)
130 try:
131 shutil.rmtree(t)
132 except (OSError, IOError):
133 pass
134
135
136 class ExampleRunner(object):
137
138 def __init__(self):
139 self.namespace = {
140 'click': click,
141 '__file__': 'dummy.py',
142 }
143
144 def declare(self, source):
145 with fake_modules():
146 code = compile(source, '<docs>', 'exec')
147 eval(code, self.namespace)
148
149 def run(self, source):
150 code = compile(source, '<docs>', 'exec')
151 buffer = []
152
153 def invoke(cmd, args=None, prog_name=None,
154 input=None, terminate_input=False, env=None,
155 **extra):
156 if env:
157 for key, value in sorted(env.items()):
158 if ' ' in value:
159 value = '"%s"' % value
160 buffer.append('$ export %s=%s' % (key, value))
161 args = args or []
162 if prog_name is None:
163 prog_name = cmd.name.replace('_', '-')
164 buffer.append(('$ %s %s' % (
165 prog_name,
166 ' '.join(('"%s"' % x) if ' ' in x else x for x in args)
167 )).rstrip())
168 if isinstance(input, (tuple, list)):
169 input = '\n'.join(input) + '\n'
170 if terminate_input:
171 input += '\xff'
172 with isolation(input=input, env=env) as output:
173 try:
174 cmd.main(args=args, prog_name=prog_name.split()[-1],
175 **extra)
176 except SystemExit:
177 pass
178 buffer.extend(output.getvalue().splitlines())
179
180 def println(text=''):
181 buffer.append(text)
182
183 eval(code, self.namespace, {
184 'invoke': invoke,
185 'println': println,
186 'isolated_filesystem': isolated_filesystem,
187 })
188 return buffer
189
190 def close(self):
191 pass
192
193
194 def parse_rst(state, content_offset, doc):
195 node = nodes.section()
196 # hack around title style bookkeeping
197 surrounding_title_styles = state.memo.title_styles
198 surrounding_section_level = state.memo.section_level
199 state.memo.title_styles = []
200 state.memo.section_level = 0
201 state.nested_parse(doc, content_offset, node, match_titles=1)
202 state.memo.title_styles = surrounding_title_styles
203 state.memo.section_level = surrounding_section_level
204 return node.children
205
206
207 def get_example_runner(document):
208 runner = getattr(document, 'click_example_runner', None)
209 if runner is None:
210 runner = document.click_example_runner = ExampleRunner()
211 return runner
212
213
214 class ExampleDirective(Directive):
215 has_content = True
216 required_arguments = 0
217 optional_arguments = 0
218 final_argument_whitespace = False
219
220 def run(self):
221 doc = ViewList()
222 runner = get_example_runner(self.state.document)
223 try:
224 runner.declare('\n'.join(self.content))
225 except:
226 runner.close()
227 raise
228 doc.append('.. sourcecode:: python', '')
229 doc.append('', '')
230 for line in self.content:
231 doc.append(' ' + line, '')
232 return parse_rst(self.state, self.content_offset, doc)
233
234
235 class RunExampleDirective(Directive):
236 has_content = True
237 required_arguments = 0
238 optional_arguments = 0
239 final_argument_whitespace = False
240
241 def run(self):
242 doc = ViewList()
243 runner = get_example_runner(self.state.document)
244 try:
245 rv = runner.run('\n'.join(self.content))
246 except:
247 runner.close()
248 raise
249 doc.append('.. sourcecode:: text', '')
250 doc.append('', '')
251 for line in rv:
252 doc.append(' ' + line, '')
253 return parse_rst(self.state, self.content_offset, doc)
254
255
256 class ClickDomain(Domain):
257 name = 'click'
258 label = 'Click'
259 directives = {
260 'example': ExampleDirective,
261 'run': RunExampleDirective,
262 }
263
264
265 def delete_example_runner_state(app, doctree):
266 runner = getattr(doctree, 'click_example_runner', None)
267 if runner is not None:
268 runner.close()
269 del doctree.click_example_runner
270
271
272 def setup(app):
273 app.add_domain(ClickDomain)
274
275 app.connect('doctree-read', delete_example_runner_state)
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/clickdoctools.py b/docs/clickdoctools.py
--- a/docs/clickdoctools.py
+++ b/docs/clickdoctools.py
@@ -17,6 +17,13 @@
from sphinx.domains import Domain
from sphinx.util.compat import Directive
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ text_type = unicode
+else:
+ text_type = str
+
class EchoingStdin(object):
@@ -70,7 +77,7 @@
@contextlib.contextmanager
def isolation(input=None, env=None):
- if isinstance(input, unicode):
+ if isinstance(input, text_type):
input = input.encode('utf-8')
input = StringIO(input or '')
output = StringIO()
| {"golden_diff": "diff --git a/docs/clickdoctools.py b/docs/clickdoctools.py\n--- a/docs/clickdoctools.py\n+++ b/docs/clickdoctools.py\n@@ -17,6 +17,13 @@\n from sphinx.domains import Domain\n from sphinx.util.compat import Directive\n \n+PY2 = sys.version_info[0] == 2\n+\n+if PY2:\n+ text_type = unicode\n+else:\n+ text_type = str\n+ \n \n class EchoingStdin(object):\n \n@@ -70,7 +77,7 @@\n \n @contextlib.contextmanager\n def isolation(input=None, env=None):\n- if isinstance(input, unicode):\n+ if isinstance(input, text_type):\n input = input.encode('utf-8')\n input = StringIO(input or '')\n output = StringIO()\n", "issue": "Can't build documentation with python 3\n```\r\n> make html\r\n...\r\nException occurred:\r\n File \"/home/user/click-6.7/docs/clickdoctools.py\", line 73, in isolation\r\n if isinstance(input, unicode):\r\nNameError: name 'unicode' is not defined\r\n```\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport click\nimport shutil\nimport tempfile\nimport contextlib\nimport subprocess\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom docutils import nodes\nfrom docutils.statemachine import ViewList\n\nfrom sphinx.domains import Domain\nfrom sphinx.util.compat import Directive\n\n\nclass EchoingStdin(object):\n\n def __init__(self, input, output):\n self._input = input\n self._output = output\n\n def __getattr__(self, x):\n return getattr(self._input, x)\n\n def _echo(self, rv):\n mark = False\n if rv.endswith('\\xff'):\n rv = rv[:-1]\n mark = True\n self._output.write(rv)\n if mark:\n self._output.write('^D\\n')\n return rv\n\n def read(self, n=-1):\n return self._echo(self._input.read(n))\n\n def readline(self, n=-1):\n return self._echo(self._input.readline(n))\n\n def readlines(self):\n return [self._echo(x) for x in self._input.readlines()]\n\n def __iter__(self):\n return iter(self._echo(x) for x in self._input)\n\n\[email protected]\ndef fake_modules():\n old_call = subprocess.call\n def dummy_call(*args, **kwargs):\n with tempfile.TemporaryFile('wb+') as f:\n kwargs['stdout'] = f\n kwargs['stderr'] = f\n rv = subprocess.Popen(*args, **kwargs).wait()\n f.seek(0)\n click.echo(f.read().decode('utf-8', 'replace').rstrip())\n return rv\n subprocess.call = dummy_call\n try:\n yield\n finally:\n subprocess.call = old_call\n\n\[email protected]\ndef isolation(input=None, env=None):\n if isinstance(input, unicode):\n input = input.encode('utf-8')\n input = StringIO(input or '')\n output = StringIO()\n sys.stdin = EchoingStdin(input, output)\n sys.stdin.encoding = 'utf-8'\n\n def visible_input(prompt=None):\n sys.stdout.write(prompt or '')\n val = input.readline().rstrip('\\r\\n')\n sys.stdout.write(val + '\\n')\n sys.stdout.flush()\n return val\n\n def hidden_input(prompt=None):\n sys.stdout.write((prompt or '') + '\\n')\n sys.stdout.flush()\n return input.readline().rstrip('\\r\\n')\n\n sys.stdout = output\n sys.stderr = output\n old_visible_prompt_func = click.termui.visible_prompt_func\n old_hidden_prompt_func = click.termui.hidden_prompt_func\n click.termui.visible_prompt_func = visible_input\n click.termui.hidden_prompt_func = hidden_input\n\n old_env = {}\n try:\n if env:\n for key, value in env.iteritems():\n old_env[key] = os.environ.get(value)\n os.environ[key] = value\n yield output\n finally:\n for key, value in old_env.iteritems():\n if value is None:\n try:\n del os.environ[key]\n except Exception:\n pass\n else:\n os.environ[key] = value\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n click.termui.visible_prompt_func = old_visible_prompt_func\n click.termui.hidden_prompt_func = old_hidden_prompt_func\n\n\[email protected]\ndef isolated_filesystem():\n cwd = os.getcwd()\n t = tempfile.mkdtemp()\n os.chdir(t)\n try:\n yield\n finally:\n os.chdir(cwd)\n try:\n shutil.rmtree(t)\n except (OSError, IOError):\n pass\n\n\nclass ExampleRunner(object):\n\n def __init__(self):\n self.namespace = {\n 'click': click,\n '__file__': 'dummy.py',\n }\n\n def declare(self, source):\n with fake_modules():\n code = compile(source, '<docs>', 'exec')\n eval(code, self.namespace)\n\n def run(self, source):\n code = compile(source, '<docs>', 'exec')\n buffer = []\n\n def invoke(cmd, args=None, prog_name=None,\n input=None, terminate_input=False, env=None,\n **extra):\n if env:\n for key, value in sorted(env.items()):\n if ' ' in value:\n value = '\"%s\"' % value\n buffer.append('$ export %s=%s' % (key, value))\n args = args or []\n if prog_name is None:\n prog_name = cmd.name.replace('_', '-')\n buffer.append(('$ %s %s' % (\n prog_name,\n ' '.join(('\"%s\"' % x) if ' ' in x else x for x in args)\n )).rstrip())\n if isinstance(input, (tuple, list)):\n input = '\\n'.join(input) + '\\n'\n if terminate_input:\n input += '\\xff'\n with isolation(input=input, env=env) as output:\n try:\n cmd.main(args=args, prog_name=prog_name.split()[-1],\n **extra)\n except SystemExit:\n pass\n buffer.extend(output.getvalue().splitlines())\n\n def println(text=''):\n buffer.append(text)\n\n eval(code, self.namespace, {\n 'invoke': invoke,\n 'println': println,\n 'isolated_filesystem': isolated_filesystem,\n })\n return buffer\n\n def close(self):\n pass\n\n\ndef parse_rst(state, content_offset, doc):\n node = nodes.section()\n # hack around title style bookkeeping\n surrounding_title_styles = state.memo.title_styles\n surrounding_section_level = state.memo.section_level\n state.memo.title_styles = []\n state.memo.section_level = 0\n state.nested_parse(doc, content_offset, node, match_titles=1)\n state.memo.title_styles = surrounding_title_styles\n state.memo.section_level = surrounding_section_level\n return node.children\n\n\ndef get_example_runner(document):\n runner = getattr(document, 'click_example_runner', None)\n if runner is None:\n runner = document.click_example_runner = ExampleRunner()\n return runner\n\n\nclass ExampleDirective(Directive):\n has_content = True\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n\n def run(self):\n doc = ViewList()\n runner = get_example_runner(self.state.document)\n try:\n runner.declare('\\n'.join(self.content))\n except:\n runner.close()\n raise\n doc.append('.. sourcecode:: python', '')\n doc.append('', '')\n for line in self.content:\n doc.append(' ' + line, '')\n return parse_rst(self.state, self.content_offset, doc)\n\n\nclass RunExampleDirective(Directive):\n has_content = True\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n\n def run(self):\n doc = ViewList()\n runner = get_example_runner(self.state.document)\n try:\n rv = runner.run('\\n'.join(self.content))\n except:\n runner.close()\n raise\n doc.append('.. sourcecode:: text', '')\n doc.append('', '')\n for line in rv:\n doc.append(' ' + line, '')\n return parse_rst(self.state, self.content_offset, doc)\n\n\nclass ClickDomain(Domain):\n name = 'click'\n label = 'Click'\n directives = {\n 'example': ExampleDirective,\n 'run': RunExampleDirective,\n }\n\n\ndef delete_example_runner_state(app, doctree):\n runner = getattr(doctree, 'click_example_runner', None)\n if runner is not None:\n runner.close()\n del doctree.click_example_runner\n\n\ndef setup(app):\n app.add_domain(ClickDomain)\n\n app.connect('doctree-read', delete_example_runner_state)\n", "path": "docs/clickdoctools.py"}], "after_files": [{"content": "import os\nimport sys\nimport click\nimport shutil\nimport tempfile\nimport contextlib\nimport subprocess\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom docutils import nodes\nfrom docutils.statemachine import ViewList\n\nfrom sphinx.domains import Domain\nfrom sphinx.util.compat import Directive\n\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n text_type = unicode\nelse:\n text_type = str\n \n\nclass EchoingStdin(object):\n\n def __init__(self, input, output):\n self._input = input\n self._output = output\n\n def __getattr__(self, x):\n return getattr(self._input, x)\n\n def _echo(self, rv):\n mark = False\n if rv.endswith('\\xff'):\n rv = rv[:-1]\n mark = True\n self._output.write(rv)\n if mark:\n self._output.write('^D\\n')\n return rv\n\n def read(self, n=-1):\n return self._echo(self._input.read(n))\n\n def readline(self, n=-1):\n return self._echo(self._input.readline(n))\n\n def readlines(self):\n return [self._echo(x) for x in self._input.readlines()]\n\n def __iter__(self):\n return iter(self._echo(x) for x in self._input)\n\n\[email protected]\ndef fake_modules():\n old_call = subprocess.call\n def dummy_call(*args, **kwargs):\n with tempfile.TemporaryFile('wb+') as f:\n kwargs['stdout'] = f\n kwargs['stderr'] = f\n rv = subprocess.Popen(*args, **kwargs).wait()\n f.seek(0)\n click.echo(f.read().decode('utf-8', 'replace').rstrip())\n return rv\n subprocess.call = dummy_call\n try:\n yield\n finally:\n subprocess.call = old_call\n\n\[email protected]\ndef isolation(input=None, env=None):\n if isinstance(input, text_type):\n input = input.encode('utf-8')\n input = StringIO(input or '')\n output = StringIO()\n sys.stdin = EchoingStdin(input, output)\n sys.stdin.encoding = 'utf-8'\n\n def visible_input(prompt=None):\n sys.stdout.write(prompt or '')\n val = input.readline().rstrip('\\r\\n')\n sys.stdout.write(val + '\\n')\n sys.stdout.flush()\n return val\n\n def hidden_input(prompt=None):\n sys.stdout.write((prompt or '') + '\\n')\n sys.stdout.flush()\n return input.readline().rstrip('\\r\\n')\n\n sys.stdout = output\n sys.stderr = output\n old_visible_prompt_func = click.termui.visible_prompt_func\n old_hidden_prompt_func = click.termui.hidden_prompt_func\n click.termui.visible_prompt_func = visible_input\n click.termui.hidden_prompt_func = hidden_input\n\n old_env = {}\n try:\n if env:\n for key, value in env.iteritems():\n old_env[key] = os.environ.get(value)\n os.environ[key] = value\n yield output\n finally:\n for key, value in old_env.iteritems():\n if value is None:\n try:\n del os.environ[key]\n except Exception:\n pass\n else:\n os.environ[key] = value\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n click.termui.visible_prompt_func = old_visible_prompt_func\n click.termui.hidden_prompt_func = old_hidden_prompt_func\n\n\[email protected]\ndef isolated_filesystem():\n cwd = os.getcwd()\n t = tempfile.mkdtemp()\n os.chdir(t)\n try:\n yield\n finally:\n os.chdir(cwd)\n try:\n shutil.rmtree(t)\n except (OSError, IOError):\n pass\n\n\nclass ExampleRunner(object):\n\n def __init__(self):\n self.namespace = {\n 'click': click,\n '__file__': 'dummy.py',\n }\n\n def declare(self, source):\n with fake_modules():\n code = compile(source, '<docs>', 'exec')\n eval(code, self.namespace)\n\n def run(self, source):\n code = compile(source, '<docs>', 'exec')\n buffer = []\n\n def invoke(cmd, args=None, prog_name=None,\n input=None, terminate_input=False, env=None,\n **extra):\n if env:\n for key, value in sorted(env.items()):\n if ' ' in value:\n value = '\"%s\"' % value\n buffer.append('$ export %s=%s' % (key, value))\n args = args or []\n if prog_name is None:\n prog_name = cmd.name.replace('_', '-')\n buffer.append(('$ %s %s' % (\n prog_name,\n ' '.join(('\"%s\"' % x) if ' ' in x else x for x in args)\n )).rstrip())\n if isinstance(input, (tuple, list)):\n input = '\\n'.join(input) + '\\n'\n if terminate_input:\n input += '\\xff'\n with isolation(input=input, env=env) as output:\n try:\n cmd.main(args=args, prog_name=prog_name.split()[-1],\n **extra)\n except SystemExit:\n pass\n buffer.extend(output.getvalue().splitlines())\n\n def println(text=''):\n buffer.append(text)\n\n eval(code, self.namespace, {\n 'invoke': invoke,\n 'println': println,\n 'isolated_filesystem': isolated_filesystem,\n })\n return buffer\n\n def close(self):\n pass\n\n\ndef parse_rst(state, content_offset, doc):\n node = nodes.section()\n # hack around title style bookkeeping\n surrounding_title_styles = state.memo.title_styles\n surrounding_section_level = state.memo.section_level\n state.memo.title_styles = []\n state.memo.section_level = 0\n state.nested_parse(doc, content_offset, node, match_titles=1)\n state.memo.title_styles = surrounding_title_styles\n state.memo.section_level = surrounding_section_level\n return node.children\n\n\ndef get_example_runner(document):\n runner = getattr(document, 'click_example_runner', None)\n if runner is None:\n runner = document.click_example_runner = ExampleRunner()\n return runner\n\n\nclass ExampleDirective(Directive):\n has_content = True\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n\n def run(self):\n doc = ViewList()\n runner = get_example_runner(self.state.document)\n try:\n runner.declare('\\n'.join(self.content))\n except:\n runner.close()\n raise\n doc.append('.. sourcecode:: python', '')\n doc.append('', '')\n for line in self.content:\n doc.append(' ' + line, '')\n return parse_rst(self.state, self.content_offset, doc)\n\n\nclass RunExampleDirective(Directive):\n has_content = True\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n\n def run(self):\n doc = ViewList()\n runner = get_example_runner(self.state.document)\n try:\n rv = runner.run('\\n'.join(self.content))\n except:\n runner.close()\n raise\n doc.append('.. sourcecode:: text', '')\n doc.append('', '')\n for line in rv:\n doc.append(' ' + line, '')\n return parse_rst(self.state, self.content_offset, doc)\n\n\nclass ClickDomain(Domain):\n name = 'click'\n label = 'Click'\n directives = {\n 'example': ExampleDirective,\n 'run': RunExampleDirective,\n }\n\n\ndef delete_example_runner_state(app, doctree):\n runner = getattr(doctree, 'click_example_runner', None)\n if runner is not None:\n runner.close()\n del doctree.click_example_runner\n\n\ndef setup(app):\n app.add_domain(ClickDomain)\n\n app.connect('doctree-read', delete_example_runner_state)\n", "path": "docs/clickdoctools.py"}]} | 2,750 | 183 |
gh_patches_debug_11476 | rasdani/github-patches | git_diff | psf__black-3257 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
(🐞) Windows line endings not preserved with blackd
<!--
Please make sure that the bug is not already fixed either in newer versions or the
current development version. To confirm this, you have three options:
1. Update Black's version if a newer release exists: `pip install -U black`
2. Use the online formatter at <https://black.vercel.app/?version=main>, which will use
the latest main branch.
3. Or run _Black_ on your machine:
- create a new virtualenv (make sure it's the same Python version);
- clone this repository;
- run `pip install -e .[d]`;
- run `pip install -r test_requirements.txt`
- make sure it's sane by running `python -m pytest`; and
- run `black` like you did last time.
-->
**Describe the bug**
```
👉 blackd --bind-port 9090 &
👉 curl -s -XPOST "localhost:9090" -d "1`r`n" -o test.py
```
(NOTE: I am using \` here to demonstrate a Windows moment, feel free to replace the data with `-d 1\r\n` if you are using a shell like `fish` or something)
`test.py` contents are now `1\n` instead of the expected `1\r\n`
<!-- A clear and concise description of what the bug is. -->
**Environment**
<!-- Please complete the following information: -->
- Black's version: <!-- e.g. [main] --> 22.8.0
- OS and Python version: <!-- e.g. [Linux/Python 3.7.4rc1] --> Windows 10 python 3.10.6
**Additional context**
<!-- Add any other context about the problem here. -->
PR adding support for this https://github.com/psf/black/pull/288
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/blackd/__init__.py`
Content:
```
1 import asyncio
2 import logging
3 from concurrent.futures import Executor, ProcessPoolExecutor
4 from datetime import datetime
5 from functools import partial
6 from multiprocessing import freeze_support
7 from typing import Set, Tuple
8
9 try:
10 from aiohttp import web
11
12 from .middlewares import cors
13 except ImportError as ie:
14 raise ImportError(
15 f"aiohttp dependency is not installed: {ie}. "
16 + "Please re-install black with the '[d]' extra install "
17 + "to obtain aiohttp_cors: `pip install black[d]`"
18 ) from None
19
20 import click
21
22 import black
23 from _black_version import version as __version__
24 from black.concurrency import maybe_install_uvloop
25
26 # This is used internally by tests to shut down the server prematurely
27 _stop_signal = asyncio.Event()
28
29 # Request headers
30 PROTOCOL_VERSION_HEADER = "X-Protocol-Version"
31 LINE_LENGTH_HEADER = "X-Line-Length"
32 PYTHON_VARIANT_HEADER = "X-Python-Variant"
33 SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
34 SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma"
35 PREVIEW = "X-Preview"
36 FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
37 DIFF_HEADER = "X-Diff"
38
39 BLACK_HEADERS = [
40 PROTOCOL_VERSION_HEADER,
41 LINE_LENGTH_HEADER,
42 PYTHON_VARIANT_HEADER,
43 SKIP_STRING_NORMALIZATION_HEADER,
44 SKIP_MAGIC_TRAILING_COMMA,
45 PREVIEW,
46 FAST_OR_SAFE_HEADER,
47 DIFF_HEADER,
48 ]
49
50 # Response headers
51 BLACK_VERSION_HEADER = "X-Black-Version"
52
53
54 class InvalidVariantHeader(Exception):
55 pass
56
57
58 @click.command(context_settings={"help_option_names": ["-h", "--help"]})
59 @click.option(
60 "--bind-host", type=str, help="Address to bind the server to.", default="localhost"
61 )
62 @click.option("--bind-port", type=int, help="Port to listen on", default=45484)
63 @click.version_option(version=black.__version__)
64 def main(bind_host: str, bind_port: int) -> None:
65 logging.basicConfig(level=logging.INFO)
66 app = make_app()
67 ver = black.__version__
68 black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}")
69 web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
70
71
72 def make_app() -> web.Application:
73 app = web.Application(
74 middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))]
75 )
76 executor = ProcessPoolExecutor()
77 app.add_routes([web.post("/", partial(handle, executor=executor))])
78 return app
79
80
81 async def handle(request: web.Request, executor: Executor) -> web.Response:
82 headers = {BLACK_VERSION_HEADER: __version__}
83 try:
84 if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1":
85 return web.Response(
86 status=501, text="This server only supports protocol version 1"
87 )
88 try:
89 line_length = int(
90 request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)
91 )
92 except ValueError:
93 return web.Response(status=400, text="Invalid line length header value")
94
95 if PYTHON_VARIANT_HEADER in request.headers:
96 value = request.headers[PYTHON_VARIANT_HEADER]
97 try:
98 pyi, versions = parse_python_variant_header(value)
99 except InvalidVariantHeader as e:
100 return web.Response(
101 status=400,
102 text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}",
103 )
104 else:
105 pyi = False
106 versions = set()
107
108 skip_string_normalization = bool(
109 request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)
110 )
111 skip_magic_trailing_comma = bool(
112 request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)
113 )
114 preview = bool(request.headers.get(PREVIEW, False))
115 fast = False
116 if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
117 fast = True
118 mode = black.FileMode(
119 target_versions=versions,
120 is_pyi=pyi,
121 line_length=line_length,
122 string_normalization=not skip_string_normalization,
123 magic_trailing_comma=not skip_magic_trailing_comma,
124 preview=preview,
125 )
126 req_bytes = await request.content.read()
127 charset = request.charset if request.charset is not None else "utf8"
128 req_str = req_bytes.decode(charset)
129 then = datetime.utcnow()
130
131 loop = asyncio.get_event_loop()
132 formatted_str = await loop.run_in_executor(
133 executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)
134 )
135
136 # Only output the diff in the HTTP response
137 only_diff = bool(request.headers.get(DIFF_HEADER, False))
138 if only_diff:
139 now = datetime.utcnow()
140 src_name = f"In\t{then} +0000"
141 dst_name = f"Out\t{now} +0000"
142 loop = asyncio.get_event_loop()
143 formatted_str = await loop.run_in_executor(
144 executor,
145 partial(black.diff, req_str, formatted_str, src_name, dst_name),
146 )
147
148 return web.Response(
149 content_type=request.content_type,
150 charset=charset,
151 headers=headers,
152 text=formatted_str,
153 )
154 except black.NothingChanged:
155 return web.Response(status=204, headers=headers)
156 except black.InvalidInput as e:
157 return web.Response(status=400, headers=headers, text=str(e))
158 except Exception as e:
159 logging.exception("Exception during handling a request")
160 return web.Response(status=500, headers=headers, text=str(e))
161
162
163 def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
164 if value == "pyi":
165 return True, set()
166 else:
167 versions = set()
168 for version in value.split(","):
169 if version.startswith("py"):
170 version = version[len("py") :]
171 if "." in version:
172 major_str, *rest = version.split(".")
173 else:
174 major_str = version[0]
175 rest = [version[1:]] if len(version) > 1 else []
176 try:
177 major = int(major_str)
178 if major not in (2, 3):
179 raise InvalidVariantHeader("major version must be 2 or 3")
180 if len(rest) > 0:
181 minor = int(rest[0])
182 if major == 2:
183 raise InvalidVariantHeader("Python 2 is not supported")
184 else:
185 # Default to lowest supported minor version.
186 minor = 7 if major == 2 else 3
187 version_str = f"PY{major}{minor}"
188 if major == 3 and not hasattr(black.TargetVersion, version_str):
189 raise InvalidVariantHeader(f"3.{minor} is not supported")
190 versions.add(black.TargetVersion[version_str])
191 except (KeyError, ValueError):
192 raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None
193 return False, versions
194
195
196 def patched_main() -> None:
197 maybe_install_uvloop()
198 freeze_support()
199 black.patch_click()
200 main()
201
202
203 if __name__ == "__main__":
204 patched_main()
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py
--- a/src/blackd/__init__.py
+++ b/src/blackd/__init__.py
@@ -133,6 +133,13 @@
executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)
)
+ # Preserve CRLF line endings
+ if req_str[req_str.find("\n") - 1] == "\r":
+ formatted_str = formatted_str.replace("\n", "\r\n")
+ # If, after swapping line endings, nothing changed, then say so
+ if formatted_str == req_str:
+ raise black.NothingChanged
+
# Only output the diff in the HTTP response
only_diff = bool(request.headers.get(DIFF_HEADER, False))
if only_diff:
| {"golden_diff": "diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py\n--- a/src/blackd/__init__.py\n+++ b/src/blackd/__init__.py\n@@ -133,6 +133,13 @@\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n \n+ # Preserve CRLF line endings\n+ if req_str[req_str.find(\"\\n\") - 1] == \"\\r\":\n+ formatted_str = formatted_str.replace(\"\\n\", \"\\r\\n\")\n+ # If, after swapping line endings, nothing changed, then say so\n+ if formatted_str == req_str:\n+ raise black.NothingChanged\n+\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n", "issue": "(\ud83d\udc1e) Windows line endings not preserved with blackd\n<!--\r\nPlease make sure that the bug is not already fixed either in newer versions or the\r\ncurrent development version. To confirm this, you have three options:\r\n\r\n1. Update Black's version if a newer release exists: `pip install -U black`\r\n2. Use the online formatter at <https://black.vercel.app/?version=main>, which will use\r\n the latest main branch.\r\n3. Or run _Black_ on your machine:\r\n - create a new virtualenv (make sure it's the same Python version);\r\n - clone this repository;\r\n - run `pip install -e .[d]`;\r\n - run `pip install -r test_requirements.txt`\r\n - make sure it's sane by running `python -m pytest`; and\r\n - run `black` like you did last time.\r\n-->\r\n\r\n**Describe the bug**\r\n```\r\n\ud83d\udc49 blackd --bind-port 9090 &\r\n\ud83d\udc49 curl -s -XPOST \"localhost:9090\" -d \"1`r`n\" -o test.py\r\n```\r\n(NOTE: I am using \\` here to demonstrate a Windows moment, feel free to replace the data with `-d 1\\r\\n` if you are using a shell like `fish` or something)\r\n\r\n`test.py` contents are now `1\\n` instead of the expected `1\\r\\n`\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n\r\n\r\n**Environment**\r\n\r\n<!-- Please complete the following information: -->\r\n\r\n- Black's version: <!-- e.g. [main] --> 22.8.0\r\n- OS and Python version: <!-- e.g. [Linux/Python 3.7.4rc1] --> Windows 10 python 3.10.6\r\n\r\n**Additional context**\r\n\r\n<!-- Add any other context about the problem here. -->\r\nPR adding support for this https://github.com/psf/black/pull/288\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\ntry:\n from aiohttp import web\n\n from .middlewares import cors\nexcept ImportError as ie:\n raise ImportError(\n f\"aiohttp dependency is not installed: {ie}. \"\n + \"Please re-install black with the '[d]' extra install \"\n + \"to obtain aiohttp_cors: `pip install black[d]`\"\n ) from None\n\nimport click\n\nimport black\nfrom _black_version import version as __version__\nfrom black.concurrency import maybe_install_uvloop\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nSKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\nPREVIEW = \"X-Preview\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n PREVIEW,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application(\n middlewares=[cors(allow_headers=(*BLACK_HEADERS, \"Content-Type\"))]\n )\n executor = ProcessPoolExecutor()\n app.add_routes([web.post(\"/\", partial(handle, executor=executor))])\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n preview = bool(request.headers.get(PREVIEW, False))\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n preview=preview,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2:\n raise InvalidVariantHeader(\"Python 2 is not supported\")\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\") from None\n return False, versions\n\n\ndef patched_main() -> None:\n maybe_install_uvloop()\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}], "after_files": [{"content": "import asyncio\nimport logging\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\ntry:\n from aiohttp import web\n\n from .middlewares import cors\nexcept ImportError as ie:\n raise ImportError(\n f\"aiohttp dependency is not installed: {ie}. \"\n + \"Please re-install black with the '[d]' extra install \"\n + \"to obtain aiohttp_cors: `pip install black[d]`\"\n ) from None\n\nimport click\n\nimport black\nfrom _black_version import version as __version__\nfrom black.concurrency import maybe_install_uvloop\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nSKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\nPREVIEW = \"X-Preview\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n PREVIEW,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application(\n middlewares=[cors(allow_headers=(*BLACK_HEADERS, \"Content-Type\"))]\n )\n executor = ProcessPoolExecutor()\n app.add_routes([web.post(\"/\", partial(handle, executor=executor))])\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n preview = bool(request.headers.get(PREVIEW, False))\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n preview=preview,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Preserve CRLF line endings\n if req_str[req_str.find(\"\\n\") - 1] == \"\\r\":\n formatted_str = formatted_str.replace(\"\\n\", \"\\r\\n\")\n # If, after swapping line endings, nothing changed, then say so\n if formatted_str == req_str:\n raise black.NothingChanged\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2:\n raise InvalidVariantHeader(\"Python 2 is not supported\")\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\") from None\n return False, versions\n\n\ndef patched_main() -> None:\n maybe_install_uvloop()\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}]} | 2,776 | 188 |
gh_patches_debug_14169 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-91 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `connections` to `httplib2.AuthorizedHttp`
/cc @dhermes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httplib2_transport/setup.py`
Content:
```
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup
16
17
18 DEPENDENCIES = (
19 'pyasn1>=0.1.7',
20 'pyasn1-modules>=0.0.5',
21 'rsa>=3.1.4',
22 'six>=1.9.0',
23 'google-auth'
24 )
25
26
27 with open('README.rst', 'r') as fh:
28 long_description = fh.read()
29
30
31 setup(
32 name='google-auth-httplib2',
33 version='0.0.1',
34 author='Google Cloud Platform',
35 author_email='[email protected]',
36 description='Google Authentication Library',
37 long_description=long_description,
38 url='https://github.com/GoogleCloudPlatform/google-auth-library-python',
39 py_modules=['google_auth_httplib2'],
40 install_requires=DEPENDENCIES,
41 license='Apache 2.0',
42 keywords='google auth oauth client',
43 classifiers=(
44 'Programming Language :: Python :: 2',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Programming Language :: Python :: 3.5',
49 'Development Status :: 3 - Alpha',
50 'Intended Audience :: Developers',
51 'License :: OSI Approved :: Apache Software License',
52 'Operating System :: POSIX',
53 'Operating System :: Microsoft :: Windows',
54 'Operating System :: MacOS :: MacOS X',
55 'Operating System :: OS Independent',
56 'Topic :: Internet :: WWW/HTTP',
57 ),
58 )
59
```
Path: `httplib2_transport/google_auth_httplib2.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Transport adapter for httplib2."""
16
17 from __future__ import absolute_import
18
19 import logging
20
21 from google.auth import exceptions
22 from google.auth import transport
23 import httplib2
24
25
26 _LOGGER = logging.getLogger(__name__)
27 # Properties present in file-like streams / buffers.
28 _STREAM_PROPERTIES = ('read', 'seek', 'tell')
29
30
31 class _Response(transport.Response):
32 """httplib2 transport response adapter.
33
34 Args:
35 response (httplib2.Response): The raw httplib2 response.
36 data (bytes): The response body.
37 """
38 def __init__(self, response, data):
39 self._response = response
40 self._data = data
41
42 @property
43 def status(self):
44 """int: The HTTP status code."""
45 return self._response.status
46
47 @property
48 def headers(self):
49 """Mapping[str, str]: The HTTP response headers."""
50 return dict(self._response)
51
52 @property
53 def data(self):
54 """bytes: The response body."""
55 return self._data
56
57
58 class Request(transport.Request):
59 """httplib2 request adapter.
60
61 This class is used internally for making requests using various transports
62 in a consistent way. If you use :class:`AuthorizedHttp` you do not need
63 to construct or use this class directly.
64
65 This class can be useful if you want to manually refresh a
66 :class:`~google.auth.credentials.Credentials` instance::
67
68 import google.auth.transport.httplib2
69 import httplib2
70
71 http = httplib2.Http()
72 request = google.auth.transport.httplib2.Request(http)
73
74 credentials.refresh(request)
75
76 Args:
77 http (httplib2.Http): The underlying http object to use to make
78 requests.
79
80 .. automethod:: __call__
81 """
82 def __init__(self, http):
83 self.http = http
84
85 def __call__(self, url, method='GET', body=None, headers=None,
86 timeout=None, **kwargs):
87 """Make an HTTP request using httplib2.
88
89 Args:
90 url (str): The URI to be requested.
91 method (str): The HTTP method to use for the request. Defaults
92 to 'GET'.
93 body (bytes): The payload / body in HTTP request.
94 headers (Mapping[str, str]): Request headers.
95 timeout (Optional[int]): The number of seconds to wait for a
96 response from the server. This is ignored by httplib2 and will
97 issue a warning.
98 kwargs: Additional arguments passed throught to the underlying
99 :meth:`httplib2.Http.request` method.
100
101 Returns:
102 google.auth.transport.Response: The HTTP response.
103
104 Raises:
105 google.auth.exceptions.TransportError: If any exception occurred.
106 """
107 if timeout is not None:
108 _LOGGER.warning(
109 'httplib2 transport does not support per-request timeout. '
110 'Set the timeout when constructing the httplib2.Http instance.'
111 )
112
113 try:
114 _LOGGER.debug('Making request: %s %s', method, url)
115 response, data = self.http.request(
116 url, method=method, body=body, headers=headers, **kwargs)
117 return _Response(response, data)
118 except httplib2.HttpLib2Error as exc:
119 raise exceptions.TransportError(exc)
120
121
122 def _make_default_http():
123 """Returns a default httplib2.Http instance."""
124 return httplib2.Http()
125
126
127 class AuthorizedHttp(object):
128 """A httplib2 HTTP class with credentials.
129
130 This class is used to perform requests to API endpoints that require
131 authorization::
132
133 from google.auth.transport._httplib2 import AuthorizedHttp
134
135 authed_http = AuthorizedHttp(credentials)
136
137 response = authed_http.request(
138 'https://www.googleapis.com/storage/v1/b')
139
140 This class implements :meth:`request` in the same way as
141 :class:`httplib2.Http` and can usually be used just like any other
142 instance of :class:``httplib2.Http`.
143
144 The underlying :meth:`request` implementation handles adding the
145 credentials' headers to the request and refreshing credentials as needed.
146 """
147 def __init__(self, credentials, http=None,
148 refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
149 max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS):
150 """
151 Args:
152 credentials (google.auth.credentials.Credentials): The credentials
153 to add to the request.
154 http (httplib2.Http): The underlying HTTP object to
155 use to make requests. If not specified, a
156 :class:`httplib2.Http` instance will be constructed.
157 refresh_status_codes (Sequence[int]): Which HTTP status codes
158 indicate that credentials should be refreshed and the request
159 should be retried.
160 max_refresh_attempts (int): The maximum number of times to attempt
161 to refresh the credentials and retry the request.
162 """
163
164 if http is None:
165 http = _make_default_http()
166
167 self.http = http
168 self.credentials = credentials
169 self._refresh_status_codes = refresh_status_codes
170 self._max_refresh_attempts = max_refresh_attempts
171 # Request instance used by internal methods (for example,
172 # credentials.refresh).
173 self._request = Request(self.http)
174
175 def request(self, uri, method='GET', body=None, headers=None,
176 **kwargs):
177 """Implementation of httplib2's Http.request."""
178
179 _credential_refresh_attempt = kwargs.pop(
180 '_credential_refresh_attempt', 0)
181
182 # Make a copy of the headers. They will be modified by the credentials
183 # and we want to pass the original headers if we recurse.
184 request_headers = headers.copy() if headers is not None else {}
185
186 self.credentials.before_request(
187 self._request, method, uri, request_headers)
188
189 # Check if the body is a file-like stream, and if so, save the body
190 # stream position so that it can be restored in case of refresh.
191 body_stream_position = None
192 if all(getattr(body, stream_prop, None) for stream_prop in
193 _STREAM_PROPERTIES):
194 body_stream_position = body.tell()
195
196 # Make the request.
197 response, content = self.http.request(
198 uri, method, body=body, headers=request_headers, **kwargs)
199
200 # If the response indicated that the credentials needed to be
201 # refreshed, then refresh the credentials and re-attempt the
202 # request.
203 # A stored token may expire between the time it is retrieved and
204 # the time the request is made, so we may need to try twice.
205 if (response.status in self._refresh_status_codes
206 and _credential_refresh_attempt < self._max_refresh_attempts):
207
208 _LOGGER.info(
209 'Refreshing credentials due to a %s response. Attempt %s/%s.',
210 response.status, _credential_refresh_attempt + 1,
211 self._max_refresh_attempts)
212
213 self.credentials.refresh(self._request)
214
215 # Restore the body's stream position if needed.
216 if body_stream_position is not None:
217 body.seek(body_stream_position)
218
219 # Recurse. Pass in the original headers, not our modified set.
220 return self.request(
221 uri, method, body=body, headers=headers,
222 _credential_refresh_attempt=_credential_refresh_attempt + 1,
223 **kwargs)
224
225 return response, content
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httplib2_transport/google_auth_httplib2.py b/httplib2_transport/google_auth_httplib2.py
--- a/httplib2_transport/google_auth_httplib2.py
+++ b/httplib2_transport/google_auth_httplib2.py
@@ -223,3 +223,13 @@
**kwargs)
return response, content
+
+ @property
+ def connections(self):
+ """Proxy to httplib2.Http.connections."""
+ return self.http.connections
+
+ @connections.setter
+ def connections(self, value):
+ """Proxy to httplib2.Http.connections."""
+ self.http.connections = value
diff --git a/httplib2_transport/setup.py b/httplib2_transport/setup.py
--- a/httplib2_transport/setup.py
+++ b/httplib2_transport/setup.py
@@ -30,7 +30,7 @@
setup(
name='google-auth-httplib2',
- version='0.0.1',
+ version='0.0.2',
author='Google Cloud Platform',
author_email='[email protected]',
description='Google Authentication Library',
| {"golden_diff": "diff --git a/httplib2_transport/google_auth_httplib2.py b/httplib2_transport/google_auth_httplib2.py\n--- a/httplib2_transport/google_auth_httplib2.py\n+++ b/httplib2_transport/google_auth_httplib2.py\n@@ -223,3 +223,13 @@\n **kwargs)\n \n return response, content\n+\n+ @property\n+ def connections(self):\n+ \"\"\"Proxy to httplib2.Http.connections.\"\"\"\n+ return self.http.connections\n+\n+ @connections.setter\n+ def connections(self, value):\n+ \"\"\"Proxy to httplib2.Http.connections.\"\"\"\n+ self.http.connections = value\ndiff --git a/httplib2_transport/setup.py b/httplib2_transport/setup.py\n--- a/httplib2_transport/setup.py\n+++ b/httplib2_transport/setup.py\n@@ -30,7 +30,7 @@\n \n setup(\n name='google-auth-httplib2',\n- version='0.0.1',\n+ version='0.0.2',\n author='Google Cloud Platform',\n author_email='[email protected]',\n description='Google Authentication Library',\n", "issue": "Add `connections` to `httplib2.AuthorizedHttp`\n/cc @dhermes\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n 'pyasn1>=0.1.7',\n 'pyasn1-modules>=0.0.5',\n 'rsa>=3.1.4',\n 'six>=1.9.0',\n 'google-auth'\n)\n\n\nwith open('README.rst', 'r') as fh:\n long_description = fh.read()\n\n\nsetup(\n name='google-auth-httplib2',\n version='0.0.1',\n author='Google Cloud Platform',\n author_email='[email protected]',\n description='Google Authentication Library',\n long_description=long_description,\n url='https://github.com/GoogleCloudPlatform/google-auth-library-python',\n py_modules=['google_auth_httplib2'],\n install_requires=DEPENDENCIES,\n license='Apache 2.0',\n keywords='google auth oauth client',\n classifiers=(\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ),\n)\n", "path": "httplib2_transport/setup.py"}, {"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport adapter for httplib2.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom google.auth import exceptions\nfrom google.auth import transport\nimport httplib2\n\n\n_LOGGER = logging.getLogger(__name__)\n# Properties present in file-like streams / buffers.\n_STREAM_PROPERTIES = ('read', 'seek', 'tell')\n\n\nclass _Response(transport.Response):\n \"\"\"httplib2 transport response adapter.\n\n Args:\n response (httplib2.Response): The raw httplib2 response.\n data (bytes): The response body.\n \"\"\"\n def __init__(self, response, data):\n self._response = response\n self._data = data\n\n @property\n def status(self):\n \"\"\"int: The HTTP status code.\"\"\"\n return self._response.status\n\n @property\n def headers(self):\n \"\"\"Mapping[str, str]: The HTTP response headers.\"\"\"\n return dict(self._response)\n\n @property\n def data(self):\n \"\"\"bytes: The response body.\"\"\"\n return self._data\n\n\nclass Request(transport.Request):\n \"\"\"httplib2 request adapter.\n\n This class is used internally for making requests using various transports\n in a consistent way. If you use :class:`AuthorizedHttp` you do not need\n to construct or use this class directly.\n\n This class can be useful if you want to manually refresh a\n :class:`~google.auth.credentials.Credentials` instance::\n\n import google.auth.transport.httplib2\n import httplib2\n\n http = httplib2.Http()\n request = google.auth.transport.httplib2.Request(http)\n\n credentials.refresh(request)\n\n Args:\n http (httplib2.Http): The underlying http object to use to make\n requests.\n\n .. automethod:: __call__\n \"\"\"\n def __init__(self, http):\n self.http = http\n\n def __call__(self, url, method='GET', body=None, headers=None,\n timeout=None, **kwargs):\n \"\"\"Make an HTTP request using httplib2.\n\n Args:\n url (str): The URI to be requested.\n method (str): The HTTP method to use for the request. Defaults\n to 'GET'.\n body (bytes): The payload / body in HTTP request.\n headers (Mapping[str, str]): Request headers.\n timeout (Optional[int]): The number of seconds to wait for a\n response from the server. This is ignored by httplib2 and will\n issue a warning.\n kwargs: Additional arguments passed throught to the underlying\n :meth:`httplib2.Http.request` method.\n\n Returns:\n google.auth.transport.Response: The HTTP response.\n\n Raises:\n google.auth.exceptions.TransportError: If any exception occurred.\n \"\"\"\n if timeout is not None:\n _LOGGER.warning(\n 'httplib2 transport does not support per-request timeout. '\n 'Set the timeout when constructing the httplib2.Http instance.'\n )\n\n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n response, data = self.http.request(\n url, method=method, body=body, headers=headers, **kwargs)\n return _Response(response, data)\n except httplib2.HttpLib2Error as exc:\n raise exceptions.TransportError(exc)\n\n\ndef _make_default_http():\n \"\"\"Returns a default httplib2.Http instance.\"\"\"\n return httplib2.Http()\n\n\nclass AuthorizedHttp(object):\n \"\"\"A httplib2 HTTP class with credentials.\n\n This class is used to perform requests to API endpoints that require\n authorization::\n\n from google.auth.transport._httplib2 import AuthorizedHttp\n\n authed_http = AuthorizedHttp(credentials)\n\n response = authed_http.request(\n 'https://www.googleapis.com/storage/v1/b')\n\n This class implements :meth:`request` in the same way as\n :class:`httplib2.Http` and can usually be used just like any other\n instance of :class:``httplib2.Http`.\n\n The underlying :meth:`request` implementation handles adding the\n credentials' headers to the request and refreshing credentials as needed.\n \"\"\"\n def __init__(self, credentials, http=None,\n refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,\n max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS):\n \"\"\"\n Args:\n credentials (google.auth.credentials.Credentials): The credentials\n to add to the request.\n http (httplib2.Http): The underlying HTTP object to\n use to make requests. If not specified, a\n :class:`httplib2.Http` instance will be constructed.\n refresh_status_codes (Sequence[int]): Which HTTP status codes\n indicate that credentials should be refreshed and the request\n should be retried.\n max_refresh_attempts (int): The maximum number of times to attempt\n to refresh the credentials and retry the request.\n \"\"\"\n\n if http is None:\n http = _make_default_http()\n\n self.http = http\n self.credentials = credentials\n self._refresh_status_codes = refresh_status_codes\n self._max_refresh_attempts = max_refresh_attempts\n # Request instance used by internal methods (for example,\n # credentials.refresh).\n self._request = Request(self.http)\n\n def request(self, uri, method='GET', body=None, headers=None,\n **kwargs):\n \"\"\"Implementation of httplib2's Http.request.\"\"\"\n\n _credential_refresh_attempt = kwargs.pop(\n '_credential_refresh_attempt', 0)\n\n # Make a copy of the headers. They will be modified by the credentials\n # and we want to pass the original headers if we recurse.\n request_headers = headers.copy() if headers is not None else {}\n\n self.credentials.before_request(\n self._request, method, uri, request_headers)\n\n # Check if the body is a file-like stream, and if so, save the body\n # stream position so that it can be restored in case of refresh.\n body_stream_position = None\n if all(getattr(body, stream_prop, None) for stream_prop in\n _STREAM_PROPERTIES):\n body_stream_position = body.tell()\n\n # Make the request.\n response, content = self.http.request(\n uri, method, body=body, headers=request_headers, **kwargs)\n\n # If the response indicated that the credentials needed to be\n # refreshed, then refresh the credentials and re-attempt the\n # request.\n # A stored token may expire between the time it is retrieved and\n # the time the request is made, so we may need to try twice.\n if (response.status in self._refresh_status_codes\n and _credential_refresh_attempt < self._max_refresh_attempts):\n\n _LOGGER.info(\n 'Refreshing credentials due to a %s response. Attempt %s/%s.',\n response.status, _credential_refresh_attempt + 1,\n self._max_refresh_attempts)\n\n self.credentials.refresh(self._request)\n\n # Restore the body's stream position if needed.\n if body_stream_position is not None:\n body.seek(body_stream_position)\n\n # Recurse. Pass in the original headers, not our modified set.\n return self.request(\n uri, method, body=body, headers=headers,\n _credential_refresh_attempt=_credential_refresh_attempt + 1,\n **kwargs)\n\n return response, content\n", "path": "httplib2_transport/google_auth_httplib2.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n 'pyasn1>=0.1.7',\n 'pyasn1-modules>=0.0.5',\n 'rsa>=3.1.4',\n 'six>=1.9.0',\n 'google-auth'\n)\n\n\nwith open('README.rst', 'r') as fh:\n long_description = fh.read()\n\n\nsetup(\n name='google-auth-httplib2',\n version='0.0.2',\n author='Google Cloud Platform',\n author_email='[email protected]',\n description='Google Authentication Library',\n long_description=long_description,\n url='https://github.com/GoogleCloudPlatform/google-auth-library-python',\n py_modules=['google_auth_httplib2'],\n install_requires=DEPENDENCIES,\n license='Apache 2.0',\n keywords='google auth oauth client',\n classifiers=(\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ),\n)\n", "path": "httplib2_transport/setup.py"}, {"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport adapter for httplib2.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom google.auth import exceptions\nfrom google.auth import transport\nimport httplib2\n\n\n_LOGGER = logging.getLogger(__name__)\n# Properties present in file-like streams / buffers.\n_STREAM_PROPERTIES = ('read', 'seek', 'tell')\n\n\nclass _Response(transport.Response):\n \"\"\"httplib2 transport response adapter.\n\n Args:\n response (httplib2.Response): The raw httplib2 response.\n data (bytes): The response body.\n \"\"\"\n def __init__(self, response, data):\n self._response = response\n self._data = data\n\n @property\n def status(self):\n \"\"\"int: The HTTP status code.\"\"\"\n return self._response.status\n\n @property\n def headers(self):\n \"\"\"Mapping[str, str]: The HTTP response headers.\"\"\"\n return dict(self._response)\n\n @property\n def data(self):\n \"\"\"bytes: The response body.\"\"\"\n return self._data\n\n\nclass Request(transport.Request):\n \"\"\"httplib2 request adapter.\n\n This class is used internally for making requests using various transports\n in a consistent way. If you use :class:`AuthorizedHttp` you do not need\n to construct or use this class directly.\n\n This class can be useful if you want to manually refresh a\n :class:`~google.auth.credentials.Credentials` instance::\n\n import google.auth.transport.httplib2\n import httplib2\n\n http = httplib2.Http()\n request = google.auth.transport.httplib2.Request(http)\n\n credentials.refresh(request)\n\n Args:\n http (httplib2.Http): The underlying http object to use to make\n requests.\n\n .. automethod:: __call__\n \"\"\"\n def __init__(self, http):\n self.http = http\n\n def __call__(self, url, method='GET', body=None, headers=None,\n timeout=None, **kwargs):\n \"\"\"Make an HTTP request using httplib2.\n\n Args:\n url (str): The URI to be requested.\n method (str): The HTTP method to use for the request. Defaults\n to 'GET'.\n body (bytes): The payload / body in HTTP request.\n headers (Mapping[str, str]): Request headers.\n timeout (Optional[int]): The number of seconds to wait for a\n response from the server. This is ignored by httplib2 and will\n issue a warning.\n kwargs: Additional arguments passed throught to the underlying\n :meth:`httplib2.Http.request` method.\n\n Returns:\n google.auth.transport.Response: The HTTP response.\n\n Raises:\n google.auth.exceptions.TransportError: If any exception occurred.\n \"\"\"\n if timeout is not None:\n _LOGGER.warning(\n 'httplib2 transport does not support per-request timeout. '\n 'Set the timeout when constructing the httplib2.Http instance.'\n )\n\n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n response, data = self.http.request(\n url, method=method, body=body, headers=headers, **kwargs)\n return _Response(response, data)\n except httplib2.HttpLib2Error as exc:\n raise exceptions.TransportError(exc)\n\n\ndef _make_default_http():\n \"\"\"Returns a default httplib2.Http instance.\"\"\"\n return httplib2.Http()\n\n\nclass AuthorizedHttp(object):\n \"\"\"A httplib2 HTTP class with credentials.\n\n This class is used to perform requests to API endpoints that require\n authorization::\n\n from google.auth.transport._httplib2 import AuthorizedHttp\n\n authed_http = AuthorizedHttp(credentials)\n\n response = authed_http.request(\n 'https://www.googleapis.com/storage/v1/b')\n\n This class implements :meth:`request` in the same way as\n :class:`httplib2.Http` and can usually be used just like any other\n instance of :class:``httplib2.Http`.\n\n The underlying :meth:`request` implementation handles adding the\n credentials' headers to the request and refreshing credentials as needed.\n \"\"\"\n def __init__(self, credentials, http=None,\n refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,\n max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS):\n \"\"\"\n Args:\n credentials (google.auth.credentials.Credentials): The credentials\n to add to the request.\n http (httplib2.Http): The underlying HTTP object to\n use to make requests. If not specified, a\n :class:`httplib2.Http` instance will be constructed.\n refresh_status_codes (Sequence[int]): Which HTTP status codes\n indicate that credentials should be refreshed and the request\n should be retried.\n max_refresh_attempts (int): The maximum number of times to attempt\n to refresh the credentials and retry the request.\n \"\"\"\n\n if http is None:\n http = _make_default_http()\n\n self.http = http\n self.credentials = credentials\n self._refresh_status_codes = refresh_status_codes\n self._max_refresh_attempts = max_refresh_attempts\n # Request instance used by internal methods (for example,\n # credentials.refresh).\n self._request = Request(self.http)\n\n def request(self, uri, method='GET', body=None, headers=None,\n **kwargs):\n \"\"\"Implementation of httplib2's Http.request.\"\"\"\n\n _credential_refresh_attempt = kwargs.pop(\n '_credential_refresh_attempt', 0)\n\n # Make a copy of the headers. They will be modified by the credentials\n # and we want to pass the original headers if we recurse.\n request_headers = headers.copy() if headers is not None else {}\n\n self.credentials.before_request(\n self._request, method, uri, request_headers)\n\n # Check if the body is a file-like stream, and if so, save the body\n # stream position so that it can be restored in case of refresh.\n body_stream_position = None\n if all(getattr(body, stream_prop, None) for stream_prop in\n _STREAM_PROPERTIES):\n body_stream_position = body.tell()\n\n # Make the request.\n response, content = self.http.request(\n uri, method, body=body, headers=request_headers, **kwargs)\n\n # If the response indicated that the credentials needed to be\n # refreshed, then refresh the credentials and re-attempt the\n # request.\n # A stored token may expire between the time it is retrieved and\n # the time the request is made, so we may need to try twice.\n if (response.status in self._refresh_status_codes\n and _credential_refresh_attempt < self._max_refresh_attempts):\n\n _LOGGER.info(\n 'Refreshing credentials due to a %s response. Attempt %s/%s.',\n response.status, _credential_refresh_attempt + 1,\n self._max_refresh_attempts)\n\n self.credentials.refresh(self._request)\n\n # Restore the body's stream position if needed.\n if body_stream_position is not None:\n body.seek(body_stream_position)\n\n # Recurse. Pass in the original headers, not our modified set.\n return self.request(\n uri, method, body=body, headers=headers,\n _credential_refresh_attempt=_credential_refresh_attempt + 1,\n **kwargs)\n\n return response, content\n\n @property\n def connections(self):\n \"\"\"Proxy to httplib2.Http.connections.\"\"\"\n return self.http.connections\n\n @connections.setter\n def connections(self, value):\n \"\"\"Proxy to httplib2.Http.connections.\"\"\"\n self.http.connections = value\n", "path": "httplib2_transport/google_auth_httplib2.py"}]} | 3,141 | 258 |
gh_patches_debug_30664 | rasdani/github-patches | git_diff | getsentry__sentry-61972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Convert issue notifications to use block kit
Convert the `IssueNotificationMessageBuilder` to use block kit (code [here](https://github.com/getsentry/sentry/blob/master/src/sentry/integrations/slack/message_builder/notifications/issues.py)) - note that these are the notifications an individual user receives if an issue alert is set to notify a specific user, and it is different from the notification that goes to a channel in that it does not have any actions (buttons).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/integrations/slack/message_builder/notifications/issues.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any, Mapping
4
5 from sentry.integrations.slack.message_builder import SlackBody
6 from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder
7 from sentry.notifications.notifications.base import ProjectNotification
8 from sentry.services.hybrid_cloud.actor import RpcActor
9
10 from .base import SlackNotificationsMessageBuilder
11
12
13 class IssueNotificationMessageBuilder(SlackNotificationsMessageBuilder):
14 def __init__(
15 self,
16 notification: ProjectNotification,
17 context: Mapping[str, Any],
18 recipient: RpcActor,
19 ) -> None:
20 super().__init__(notification, context, recipient)
21 self.notification: ProjectNotification = notification
22
23 def build(self) -> SlackBody:
24 group = getattr(self.notification, "group", None)
25 return SlackIssuesMessageBuilder(
26 group=group,
27 event=getattr(self.notification, "event", None),
28 tags=self.context.get("tags", None),
29 rules=getattr(self.notification, "rules", None),
30 issue_details=True,
31 notification=self.notification,
32 recipient=self.recipient,
33 ).build()
34
```
Path: `src/sentry/integrations/slack/notifications.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 from copy import copy
5 from typing import Any, Iterable, List, Mapping
6
7 import sentry_sdk
8
9 from sentry import features
10 from sentry.integrations.mixins import NotifyBasicMixin
11 from sentry.integrations.notifications import get_context, get_integrations_by_channel_by_recipient
12 from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock
13 from sentry.integrations.slack.message_builder.base.block import BlockSlackMessageBuilder
14 from sentry.integrations.slack.message_builder.notifications import get_message_builder
15 from sentry.models.integrations.integration import Integration
16 from sentry.notifications.additional_attachment_manager import get_additional_attachment
17 from sentry.notifications.notifications.base import BaseNotification
18 from sentry.notifications.notify import register_notification_provider
19 from sentry.services.hybrid_cloud.actor import RpcActor
20 from sentry.shared_integrations.exceptions import ApiError
21 from sentry.tasks.integrations.slack import post_message
22 from sentry.types.integrations import ExternalProviders
23 from sentry.utils import json, metrics
24
25 logger = logging.getLogger("sentry.notifications")
26 SLACK_TIMEOUT = 5
27
28
29 class SlackNotifyBasicMixin(NotifyBasicMixin):
30 def send_message(self, channel_id: str, message: str) -> None:
31 payload = {"channel": channel_id, "text": message}
32 try:
33 self.get_client().post("/chat.postMessage", data=payload, json=True)
34 except ApiError as e:
35 message = str(e)
36 if message not in ["Expired url", "channel_not_found"]:
37 logger.exception(
38 "slack.slash-notify.response-error",
39 extra={"error": message},
40 )
41
42
43 def _get_attachments(
44 notification: BaseNotification,
45 recipient: RpcActor,
46 shared_context: Mapping[str, Any],
47 extra_context_by_actor: Mapping[RpcActor, Mapping[str, Any]] | None,
48 ) -> List[SlackAttachment] | SlackBlock:
49 extra_context = (
50 extra_context_by_actor[recipient] if extra_context_by_actor and recipient else {}
51 )
52 context = get_context(notification, recipient, shared_context, extra_context)
53 cls = get_message_builder(notification.message_builder)
54 attachments = cls(notification, context, recipient).build()
55 if isinstance(attachments, List) or features.has(
56 "organizations:slack-block-kit", notification.organization
57 ):
58 return attachments
59 return [attachments]
60
61
62 def _notify_recipient(
63 notification: BaseNotification,
64 recipient: RpcActor,
65 attachments: List[SlackAttachment],
66 channel: str,
67 integration: Integration,
68 shared_context: Mapping[str, Any],
69 ) -> None:
70 with sentry_sdk.start_span(op="notification.send_slack", description="notify_recipient"):
71 # Make a local copy to which we can append.
72 local_attachments = copy(attachments)
73
74 text = notification.get_notification_title(ExternalProviders.SLACK, shared_context)
75
76 if features.has("organizations:slack-block-kit", notification.organization):
77 blocks = []
78 if text:
79 # NOTE(isabella): with legacy attachments, the notification title was
80 # automatically rendered based on the `text` field in the payload; in the block
81 # system, that payload field is used strictly as preview/fallback text, so we need
82 # to add this block to render the title in the notification ie. "Issue marked as
83 # regression", "New comment by <user>"
84 blocks.append(BlockSlackMessageBuilder.get_markdown_block(text))
85 attachment_blocks = local_attachments.get("blocks")
86 if attachment_blocks:
87 for attachment in attachment_blocks:
88 blocks.append(attachment)
89 additional_attachment = get_additional_attachment(
90 integration, notification.organization
91 )
92 if additional_attachment:
93 for block in additional_attachment:
94 blocks.append(block)
95 if (
96 not text
97 ): # if there isn't a notification title, try using message description as fallback
98 text = notification.get_message_description(recipient, ExternalProviders.SLACK)
99 payload = {
100 "channel": channel,
101 "unfurl_links": False,
102 "unfurl_media": False,
103 "text": text if text else "",
104 "blocks": json.dumps(blocks),
105 }
106 callback_id = local_attachments.get("callback_id")
107 if callback_id:
108 # callback_id is now at the same level as blocks, rather than within attachments
109 if isinstance(callback_id, str):
110 payload["callback_id"] = callback_id
111 else:
112 payload["callback_id"] = json.dumps(local_attachments.get("callback_id"))
113 else:
114 # Add optional billing related attachment.
115 additional_attachment = get_additional_attachment(
116 integration, notification.organization
117 )
118 if additional_attachment:
119 local_attachments.append(additional_attachment)
120
121 # unfurl_links and unfurl_media are needed to preserve the intended message format
122 # and prevent the app from replying with help text to the unfurl
123 payload = {
124 "channel": channel,
125 "link_names": 1,
126 "unfurl_links": False,
127 "unfurl_media": False,
128 "text": text,
129 "attachments": json.dumps(local_attachments),
130 }
131
132 log_params = {
133 "notification": notification,
134 "recipient": recipient.id,
135 "channel_id": channel,
136 }
137 post_message.apply_async(
138 kwargs={
139 "integration_id": integration.id,
140 "payload": payload,
141 "log_error_message": "notification.fail.slack_post",
142 "log_params": log_params,
143 }
144 )
145 # recording data outside of span
146 notification.record_notification_sent(recipient, ExternalProviders.SLACK)
147
148
149 @register_notification_provider(ExternalProviders.SLACK)
150 def send_notification_as_slack(
151 notification: BaseNotification,
152 recipients: Iterable[RpcActor],
153 shared_context: Mapping[str, Any],
154 extra_context_by_actor: Mapping[RpcActor, Mapping[str, Any]] | None,
155 ) -> None:
156 """Send an "activity" or "alert rule" notification to a Slack user or team, but NOT to a channel directly.
157 Sending Slack notifications to a channel is in integrations/slack/actions/notification.py"""
158 with sentry_sdk.start_span(
159 op="notification.send_slack", description="gen_channel_integration_map"
160 ):
161 data = get_integrations_by_channel_by_recipient(
162 notification.organization, recipients, ExternalProviders.SLACK
163 )
164
165 for recipient, integrations_by_channel in data.items():
166 with sentry_sdk.start_span(op="notification.send_slack", description="send_one"):
167 with sentry_sdk.start_span(op="notification.send_slack", description="gen_attachments"):
168 attachments = _get_attachments(
169 notification,
170 recipient,
171 shared_context,
172 extra_context_by_actor,
173 )
174
175 for channel, integration in integrations_by_channel.items():
176 _notify_recipient(
177 notification=notification,
178 recipient=recipient,
179 attachments=attachments,
180 channel=channel,
181 integration=integration,
182 shared_context=shared_context,
183 )
184
185 metrics.incr(
186 f"{notification.metrics_key}.notifications.sent",
187 instance=f"slack.{notification.metrics_key}.notification",
188 skip_internal=False,
189 )
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/integrations/slack/message_builder/notifications/issues.py b/src/sentry/integrations/slack/message_builder/notifications/issues.py
--- a/src/sentry/integrations/slack/message_builder/notifications/issues.py
+++ b/src/sentry/integrations/slack/message_builder/notifications/issues.py
@@ -2,7 +2,7 @@
from typing import Any, Mapping
-from sentry.integrations.slack.message_builder import SlackBody
+from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock
from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder
from sentry.notifications.notifications.base import ProjectNotification
from sentry.services.hybrid_cloud.actor import RpcActor
@@ -20,7 +20,7 @@
super().__init__(notification, context, recipient)
self.notification: ProjectNotification = notification
- def build(self) -> SlackBody:
+ def build(self) -> SlackAttachment | SlackBlock:
group = getattr(self.notification, "group", None)
return SlackIssuesMessageBuilder(
group=group,
diff --git a/src/sentry/integrations/slack/notifications.py b/src/sentry/integrations/slack/notifications.py
--- a/src/sentry/integrations/slack/notifications.py
+++ b/src/sentry/integrations/slack/notifications.py
@@ -86,6 +86,10 @@
if attachment_blocks:
for attachment in attachment_blocks:
blocks.append(attachment)
+ if len(blocks) >= 2 and blocks[1].get("block_id"):
+ # block id needs to be in the first block
+ blocks[0]["block_id"] = blocks[1]["block_id"]
+ del blocks[1]["block_id"]
additional_attachment = get_additional_attachment(
integration, notification.organization
)
| {"golden_diff": "diff --git a/src/sentry/integrations/slack/message_builder/notifications/issues.py b/src/sentry/integrations/slack/message_builder/notifications/issues.py\n--- a/src/sentry/integrations/slack/message_builder/notifications/issues.py\n+++ b/src/sentry/integrations/slack/message_builder/notifications/issues.py\n@@ -2,7 +2,7 @@\n \n from typing import Any, Mapping\n \n-from sentry.integrations.slack.message_builder import SlackBody\n+from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\n from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\n from sentry.notifications.notifications.base import ProjectNotification\n from sentry.services.hybrid_cloud.actor import RpcActor\n@@ -20,7 +20,7 @@\n super().__init__(notification, context, recipient)\n self.notification: ProjectNotification = notification\n \n- def build(self) -> SlackBody:\n+ def build(self) -> SlackAttachment | SlackBlock:\n group = getattr(self.notification, \"group\", None)\n return SlackIssuesMessageBuilder(\n group=group,\ndiff --git a/src/sentry/integrations/slack/notifications.py b/src/sentry/integrations/slack/notifications.py\n--- a/src/sentry/integrations/slack/notifications.py\n+++ b/src/sentry/integrations/slack/notifications.py\n@@ -86,6 +86,10 @@\n if attachment_blocks:\n for attachment in attachment_blocks:\n blocks.append(attachment)\n+ if len(blocks) >= 2 and blocks[1].get(\"block_id\"):\n+ # block id needs to be in the first block\n+ blocks[0][\"block_id\"] = blocks[1][\"block_id\"]\n+ del blocks[1][\"block_id\"]\n additional_attachment = get_additional_attachment(\n integration, notification.organization\n )\n", "issue": "Convert issue notifications to use block kit\nConvert the `IssueNotificationMessageBuilder` to use block kit (code [here](https://github.com/getsentry/sentry/blob/master/src/sentry/integrations/slack/message_builder/notifications/issues.py)) - note that these are the notifications an individual user receives if an issue alert is set to notify a specific user, and it is different from the notification that goes to a channel in that it does not have any actions (buttons). \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Mapping\n\nfrom sentry.integrations.slack.message_builder import SlackBody\nfrom sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\nfrom sentry.notifications.notifications.base import ProjectNotification\nfrom sentry.services.hybrid_cloud.actor import RpcActor\n\nfrom .base import SlackNotificationsMessageBuilder\n\n\nclass IssueNotificationMessageBuilder(SlackNotificationsMessageBuilder):\n def __init__(\n self,\n notification: ProjectNotification,\n context: Mapping[str, Any],\n recipient: RpcActor,\n ) -> None:\n super().__init__(notification, context, recipient)\n self.notification: ProjectNotification = notification\n\n def build(self) -> SlackBody:\n group = getattr(self.notification, \"group\", None)\n return SlackIssuesMessageBuilder(\n group=group,\n event=getattr(self.notification, \"event\", None),\n tags=self.context.get(\"tags\", None),\n rules=getattr(self.notification, \"rules\", None),\n issue_details=True,\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n", "path": "src/sentry/integrations/slack/message_builder/notifications/issues.py"}, {"content": "from __future__ import annotations\n\nimport logging\nfrom copy import copy\nfrom typing import Any, Iterable, List, Mapping\n\nimport sentry_sdk\n\nfrom sentry import features\nfrom sentry.integrations.mixins import NotifyBasicMixin\nfrom sentry.integrations.notifications import get_context, get_integrations_by_channel_by_recipient\nfrom sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\nfrom sentry.integrations.slack.message_builder.base.block import BlockSlackMessageBuilder\nfrom sentry.integrations.slack.message_builder.notifications import get_message_builder\nfrom sentry.models.integrations.integration import Integration\nfrom sentry.notifications.additional_attachment_manager import get_additional_attachment\nfrom sentry.notifications.notifications.base import BaseNotification\nfrom sentry.notifications.notify import register_notification_provider\nfrom sentry.services.hybrid_cloud.actor import RpcActor\nfrom sentry.shared_integrations.exceptions import ApiError\nfrom sentry.tasks.integrations.slack import post_message\nfrom sentry.types.integrations import ExternalProviders\nfrom sentry.utils import json, metrics\n\nlogger = logging.getLogger(\"sentry.notifications\")\nSLACK_TIMEOUT = 5\n\n\nclass SlackNotifyBasicMixin(NotifyBasicMixin):\n def send_message(self, channel_id: str, message: str) -> None:\n payload = {\"channel\": channel_id, \"text\": message}\n try:\n self.get_client().post(\"/chat.postMessage\", data=payload, json=True)\n except ApiError as e:\n message = str(e)\n if message not in [\"Expired url\", \"channel_not_found\"]:\n logger.exception(\n \"slack.slash-notify.response-error\",\n extra={\"error\": message},\n )\n\n\ndef _get_attachments(\n notification: BaseNotification,\n recipient: RpcActor,\n shared_context: Mapping[str, Any],\n extra_context_by_actor: Mapping[RpcActor, Mapping[str, Any]] | None,\n) -> List[SlackAttachment] | SlackBlock:\n extra_context = (\n extra_context_by_actor[recipient] if extra_context_by_actor and recipient else {}\n )\n context = get_context(notification, recipient, shared_context, extra_context)\n cls = get_message_builder(notification.message_builder)\n attachments = cls(notification, context, recipient).build()\n if isinstance(attachments, List) or features.has(\n \"organizations:slack-block-kit\", notification.organization\n ):\n return attachments\n return [attachments]\n\n\ndef _notify_recipient(\n notification: BaseNotification,\n recipient: RpcActor,\n attachments: List[SlackAttachment],\n channel: str,\n integration: Integration,\n shared_context: Mapping[str, Any],\n) -> None:\n with sentry_sdk.start_span(op=\"notification.send_slack\", description=\"notify_recipient\"):\n # Make a local copy to which we can append.\n local_attachments = copy(attachments)\n\n text = notification.get_notification_title(ExternalProviders.SLACK, shared_context)\n\n if features.has(\"organizations:slack-block-kit\", notification.organization):\n blocks = []\n if text:\n # NOTE(isabella): with legacy attachments, the notification title was\n # automatically rendered based on the `text` field in the payload; in the block\n # system, that payload field is used strictly as preview/fallback text, so we need\n # to add this block to render the title in the notification ie. \"Issue marked as\n # regression\", \"New comment by <user>\"\n blocks.append(BlockSlackMessageBuilder.get_markdown_block(text))\n attachment_blocks = local_attachments.get(\"blocks\")\n if attachment_blocks:\n for attachment in attachment_blocks:\n blocks.append(attachment)\n additional_attachment = get_additional_attachment(\n integration, notification.organization\n )\n if additional_attachment:\n for block in additional_attachment:\n blocks.append(block)\n if (\n not text\n ): # if there isn't a notification title, try using message description as fallback\n text = notification.get_message_description(recipient, ExternalProviders.SLACK)\n payload = {\n \"channel\": channel,\n \"unfurl_links\": False,\n \"unfurl_media\": False,\n \"text\": text if text else \"\",\n \"blocks\": json.dumps(blocks),\n }\n callback_id = local_attachments.get(\"callback_id\")\n if callback_id:\n # callback_id is now at the same level as blocks, rather than within attachments\n if isinstance(callback_id, str):\n payload[\"callback_id\"] = callback_id\n else:\n payload[\"callback_id\"] = json.dumps(local_attachments.get(\"callback_id\"))\n else:\n # Add optional billing related attachment.\n additional_attachment = get_additional_attachment(\n integration, notification.organization\n )\n if additional_attachment:\n local_attachments.append(additional_attachment)\n\n # unfurl_links and unfurl_media are needed to preserve the intended message format\n # and prevent the app from replying with help text to the unfurl\n payload = {\n \"channel\": channel,\n \"link_names\": 1,\n \"unfurl_links\": False,\n \"unfurl_media\": False,\n \"text\": text,\n \"attachments\": json.dumps(local_attachments),\n }\n\n log_params = {\n \"notification\": notification,\n \"recipient\": recipient.id,\n \"channel_id\": channel,\n }\n post_message.apply_async(\n kwargs={\n \"integration_id\": integration.id,\n \"payload\": payload,\n \"log_error_message\": \"notification.fail.slack_post\",\n \"log_params\": log_params,\n }\n )\n # recording data outside of span\n notification.record_notification_sent(recipient, ExternalProviders.SLACK)\n\n\n@register_notification_provider(ExternalProviders.SLACK)\ndef send_notification_as_slack(\n notification: BaseNotification,\n recipients: Iterable[RpcActor],\n shared_context: Mapping[str, Any],\n extra_context_by_actor: Mapping[RpcActor, Mapping[str, Any]] | None,\n) -> None:\n \"\"\"Send an \"activity\" or \"alert rule\" notification to a Slack user or team, but NOT to a channel directly.\n Sending Slack notifications to a channel is in integrations/slack/actions/notification.py\"\"\"\n with sentry_sdk.start_span(\n op=\"notification.send_slack\", description=\"gen_channel_integration_map\"\n ):\n data = get_integrations_by_channel_by_recipient(\n notification.organization, recipients, ExternalProviders.SLACK\n )\n\n for recipient, integrations_by_channel in data.items():\n with sentry_sdk.start_span(op=\"notification.send_slack\", description=\"send_one\"):\n with sentry_sdk.start_span(op=\"notification.send_slack\", description=\"gen_attachments\"):\n attachments = _get_attachments(\n notification,\n recipient,\n shared_context,\n extra_context_by_actor,\n )\n\n for channel, integration in integrations_by_channel.items():\n _notify_recipient(\n notification=notification,\n recipient=recipient,\n attachments=attachments,\n channel=channel,\n integration=integration,\n shared_context=shared_context,\n )\n\n metrics.incr(\n f\"{notification.metrics_key}.notifications.sent\",\n instance=f\"slack.{notification.metrics_key}.notification\",\n skip_internal=False,\n )\n", "path": "src/sentry/integrations/slack/notifications.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Mapping\n\nfrom sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\nfrom sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\nfrom sentry.notifications.notifications.base import ProjectNotification\nfrom sentry.services.hybrid_cloud.actor import RpcActor\n\nfrom .base import SlackNotificationsMessageBuilder\n\n\nclass IssueNotificationMessageBuilder(SlackNotificationsMessageBuilder):\n def __init__(\n self,\n notification: ProjectNotification,\n context: Mapping[str, Any],\n recipient: RpcActor,\n ) -> None:\n super().__init__(notification, context, recipient)\n self.notification: ProjectNotification = notification\n\n def build(self) -> SlackAttachment | SlackBlock:\n group = getattr(self.notification, \"group\", None)\n return SlackIssuesMessageBuilder(\n group=group,\n event=getattr(self.notification, \"event\", None),\n tags=self.context.get(\"tags\", None),\n rules=getattr(self.notification, \"rules\", None),\n issue_details=True,\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n", "path": "src/sentry/integrations/slack/message_builder/notifications/issues.py"}, {"content": "from __future__ import annotations\n\nimport logging\nfrom copy import copy\nfrom typing import Any, Iterable, List, Mapping\n\nimport sentry_sdk\n\nfrom sentry import features\nfrom sentry.integrations.mixins import NotifyBasicMixin\nfrom sentry.integrations.notifications import get_context, get_integrations_by_channel_by_recipient\nfrom sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\nfrom sentry.integrations.slack.message_builder.base.block import BlockSlackMessageBuilder\nfrom sentry.integrations.slack.message_builder.notifications import get_message_builder\nfrom sentry.models.integrations.integration import Integration\nfrom sentry.notifications.additional_attachment_manager import get_additional_attachment\nfrom sentry.notifications.notifications.base import BaseNotification\nfrom sentry.notifications.notify import register_notification_provider\nfrom sentry.services.hybrid_cloud.actor import RpcActor\nfrom sentry.shared_integrations.exceptions import ApiError\nfrom sentry.tasks.integrations.slack import post_message\nfrom sentry.types.integrations import ExternalProviders\nfrom sentry.utils import json, metrics\n\nlogger = logging.getLogger(\"sentry.notifications\")\nSLACK_TIMEOUT = 5\n\n\nclass SlackNotifyBasicMixin(NotifyBasicMixin):\n def send_message(self, channel_id: str, message: str) -> None:\n payload = {\"channel\": channel_id, \"text\": message}\n try:\n self.get_client().post(\"/chat.postMessage\", data=payload, json=True)\n except ApiError as e:\n message = str(e)\n if message not in [\"Expired url\", \"channel_not_found\"]:\n logger.exception(\n \"slack.slash-notify.response-error\",\n extra={\"error\": message},\n )\n\n\ndef _get_attachments(\n notification: BaseNotification,\n recipient: RpcActor,\n shared_context: Mapping[str, Any],\n extra_context_by_actor: Mapping[RpcActor, Mapping[str, Any]] | None,\n) -> List[SlackAttachment] | SlackBlock:\n extra_context = (\n extra_context_by_actor[recipient] if extra_context_by_actor and recipient else {}\n )\n context = get_context(notification, recipient, shared_context, extra_context)\n cls = get_message_builder(notification.message_builder)\n attachments = cls(notification, context, recipient).build()\n if isinstance(attachments, List) or features.has(\n \"organizations:slack-block-kit\", notification.organization\n ):\n return attachments\n return [attachments]\n\n\ndef _notify_recipient(\n notification: BaseNotification,\n recipient: RpcActor,\n attachments: List[SlackAttachment],\n channel: str,\n integration: Integration,\n shared_context: Mapping[str, Any],\n) -> None:\n with sentry_sdk.start_span(op=\"notification.send_slack\", description=\"notify_recipient\"):\n # Make a local copy to which we can append.\n local_attachments = copy(attachments)\n\n text = notification.get_notification_title(ExternalProviders.SLACK, shared_context)\n\n if features.has(\"organizations:slack-block-kit\", notification.organization):\n blocks = []\n if text:\n # NOTE(isabella): with legacy attachments, the notification title was\n # automatically rendered based on the `text` field in the payload; in the block\n # system, that payload field is used strictly as preview/fallback text, so we need\n # to add this block to render the title in the notification ie. \"Issue marked as\n # regression\", \"New comment by <user>\"\n blocks.append(BlockSlackMessageBuilder.get_markdown_block(text))\n attachment_blocks = local_attachments.get(\"blocks\")\n if attachment_blocks:\n for attachment in attachment_blocks:\n blocks.append(attachment)\n if len(blocks) >= 2 and blocks[1].get(\"block_id\"):\n # block id needs to be in the first block\n blocks[0][\"block_id\"] = blocks[1][\"block_id\"]\n del blocks[1][\"block_id\"]\n additional_attachment = get_additional_attachment(\n integration, notification.organization\n )\n if additional_attachment:\n for block in additional_attachment:\n blocks.append(block)\n if (\n not text\n ): # if there isn't a notification title, try using message description as fallback\n text = notification.get_message_description(recipient, ExternalProviders.SLACK)\n payload = {\n \"channel\": channel,\n \"unfurl_links\": False,\n \"unfurl_media\": False,\n \"text\": text if text else \"\",\n \"blocks\": json.dumps(blocks),\n }\n callback_id = local_attachments.get(\"callback_id\")\n if callback_id:\n # callback_id is now at the same level as blocks, rather than within attachments\n if isinstance(callback_id, str):\n payload[\"callback_id\"] = callback_id\n else:\n payload[\"callback_id\"] = json.dumps(local_attachments.get(\"callback_id\"))\n else:\n # Add optional billing related attachment.\n additional_attachment = get_additional_attachment(\n integration, notification.organization\n )\n if additional_attachment:\n local_attachments.append(additional_attachment)\n\n # unfurl_links and unfurl_media are needed to preserve the intended message format\n # and prevent the app from replying with help text to the unfurl\n payload = {\n \"channel\": channel,\n \"link_names\": 1,\n \"unfurl_links\": False,\n \"unfurl_media\": False,\n \"text\": text,\n \"attachments\": json.dumps(local_attachments),\n }\n\n log_params = {\n \"notification\": notification,\n \"recipient\": recipient.id,\n \"channel_id\": channel,\n }\n post_message.apply_async(\n kwargs={\n \"integration_id\": integration.id,\n \"payload\": payload,\n \"log_error_message\": \"notification.fail.slack_post\",\n \"log_params\": log_params,\n }\n )\n # recording data outside of span\n notification.record_notification_sent(recipient, ExternalProviders.SLACK)\n\n\n@register_notification_provider(ExternalProviders.SLACK)\ndef send_notification_as_slack(\n notification: BaseNotification,\n recipients: Iterable[RpcActor],\n shared_context: Mapping[str, Any],\n extra_context_by_actor: Mapping[RpcActor, Mapping[str, Any]] | None,\n) -> None:\n \"\"\"Send an \"activity\" or \"alert rule\" notification to a Slack user or team, but NOT to a channel directly.\n Sending Slack notifications to a channel is in integrations/slack/actions/notification.py\"\"\"\n with sentry_sdk.start_span(\n op=\"notification.send_slack\", description=\"gen_channel_integration_map\"\n ):\n data = get_integrations_by_channel_by_recipient(\n notification.organization, recipients, ExternalProviders.SLACK\n )\n\n for recipient, integrations_by_channel in data.items():\n with sentry_sdk.start_span(op=\"notification.send_slack\", description=\"send_one\"):\n with sentry_sdk.start_span(op=\"notification.send_slack\", description=\"gen_attachments\"):\n attachments = _get_attachments(\n notification,\n recipient,\n shared_context,\n extra_context_by_actor,\n )\n\n for channel, integration in integrations_by_channel.items():\n _notify_recipient(\n notification=notification,\n recipient=recipient,\n attachments=attachments,\n channel=channel,\n integration=integration,\n shared_context=shared_context,\n )\n\n metrics.incr(\n f\"{notification.metrics_key}.notifications.sent\",\n instance=f\"slack.{notification.metrics_key}.notification\",\n skip_internal=False,\n )\n", "path": "src/sentry/integrations/slack/notifications.py"}]} | 2,661 | 399 |
gh_patches_debug_7581 | rasdani/github-patches | git_diff | ivy-llc__ivy-18346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sort
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/search.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_supported_dtypes(
10 {"2.5.0 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
11 "paddle",
12 )
13 @to_ivy_arrays_and_back
14 def argmax(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
15 return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)
16
17
18 @with_supported_dtypes(
19 {"2.5.0 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
20 "paddle",
21 )
22 @to_ivy_arrays_and_back
23 def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
24 return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
25
26
27 @with_supported_dtypes(
28 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
29 "paddle",
30 )
31 @to_ivy_arrays_and_back
32 def argsort(x, /, *, axis=-1, descending=False, name=None):
33 return ivy.argsort(x, axis=axis, descending=descending)
34
35
36 @with_supported_dtypes(
37 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
38 "paddle",
39 )
40 @to_ivy_arrays_and_back
41 def nonzero(input, /, *, as_tuple=False):
42 ret = ivy.nonzero(input)
43 if as_tuple is False:
44 ret = ivy.matrix_transpose(ivy.stack(ret))
45 return ret
46
47
48 @with_supported_dtypes(
49 {"2.5.0 and below": ("float32", "float64", "int32", "int64")},
50 "paddle",
51 )
52 @to_ivy_arrays_and_back
53 def searchsorted(sorted_sequence, values, out_int32=False, right=False, name=None):
54 if right:
55 side = "right"
56 else:
57 side = "left"
58 ret = ivy.searchsorted(sorted_sequence, values, side=side)
59 if out_int32:
60 ret = ivy.astype(ret, "int32")
61 return ret
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py
--- a/ivy/functional/frontends/paddle/tensor/search.py
+++ b/ivy/functional/frontends/paddle/tensor/search.py
@@ -33,6 +33,15 @@
return ivy.argsort(x, axis=axis, descending=descending)
+@with_supported_dtypes(
+ {"2.5.0 and below": ("float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def sort(x, /, *, axis=-1, descending=False, name=None):
+ return ivy.sort(x, axis=axis, descending=descending)
+
+
@with_supported_dtypes(
{"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
"paddle",
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py\n--- a/ivy/functional/frontends/paddle/tensor/search.py\n+++ b/ivy/functional/frontends/paddle/tensor/search.py\n@@ -33,6 +33,15 @@\n return ivy.argsort(x, axis=axis, descending=descending)\n \n \n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def sort(x, /, *, axis=-1, descending=False, name=None):\n+ return ivy.sort(x, axis=axis, descending=descending)\n+\n+\n @with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n", "issue": "Sort\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argsort(x, /, *, axis=-1, descending=False, name=None):\n return ivy.argsort(x, axis=axis, descending=descending)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef nonzero(input, /, *, as_tuple=False):\n ret = ivy.nonzero(input)\n if as_tuple is False:\n ret = ivy.matrix_transpose(ivy.stack(ret))\n return ret\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef searchsorted(sorted_sequence, values, out_int32=False, right=False, name=None):\n if right:\n side = \"right\"\n else:\n side = \"left\"\n ret = ivy.searchsorted(sorted_sequence, values, side=side)\n if out_int32:\n ret = ivy.astype(ret, \"int32\")\n return ret\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argsort(x, /, *, axis=-1, descending=False, name=None):\n return ivy.argsort(x, axis=axis, descending=descending)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef sort(x, /, *, axis=-1, descending=False, name=None):\n return ivy.sort(x, axis=axis, descending=descending)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef nonzero(input, /, *, as_tuple=False):\n ret = ivy.nonzero(input)\n if as_tuple is False:\n ret = ivy.matrix_transpose(ivy.stack(ret))\n return ret\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef searchsorted(sorted_sequence, values, out_int32=False, right=False, name=None):\n if right:\n side = \"right\"\n else:\n side = \"left\"\n ret = ivy.searchsorted(sorted_sequence, values, side=side)\n if out_int32:\n ret = ivy.astype(ret, \"int32\")\n return ret\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}]} | 964 | 230 |
gh_patches_debug_29121 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-2235 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Perplexity dtype restriction too strict
## 🐛 Bug
The perplexity metric requires the input dtype to be either fp32 or fp64, but this doesn't work with e.g. fp16, and users need to manually recast.
https://github.com/Lightning-AI/torchmetrics/blob/894de4caeeae820f60f1871d75334873241e5633/src/torchmetrics/functional/text/perplexity.py#L21
### Expected behavior
The metric should accept other floating point dtypes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/text/perplexity.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional, Tuple
16
17 import torch
18 from torch import Tensor
19 from torch.nn import functional as F # noqa: N812
20
21 _TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)
22
23
24 def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:
25 """Check shape and type consistency of input vectors.
26
27 Args:
28 preds:
29 Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
30 vocab_size]. Scores will be normalized internally using softmax.
31 target:
32 Ground truth values with a shape [batch_size, seq_len].
33
34 Raises:
35 ValueError:
36 If ``preds`` tensor has no 3 dimensions.
37 ValueError:
38 If ``target`` tensor has no 2 dimensions.
39 ValueError:
40 If the first two dimensions of ``preds`` and ``target`` do not equal.
41 TypeError:
42 If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``
43 TypeError:
44 If ``target`` is not of a type LongTensor (torch.int64)
45
46 """
47 if len(preds.shape) != 3:
48 raise ValueError(
49 "Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],"
50 f" but got {len(preds.shape)}."
51 )
52 if len(target.shape) != 2:
53 raise ValueError(
54 "Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],"
55 f" but got {len(target.shape)}."
56 )
57 if preds.shape[:2] != target.shape:
58 raise ValueError(
59 "Input tensors `preds` and `target` are expected to have equaling first two dimensions,"
60 f" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}."
61 )
62 if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:
63 raise TypeError(
64 f"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}."
65 )
66 if target.dtype != torch.int64:
67 raise TypeError(f"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.")
68
69
70 def _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:
71 """Compute intermediate statistics for Perplexity.
72
73 Args:
74 preds:
75 Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
76 vocab_size]. Scores will be normalized internally using softmax.
77 target:
78 Ground truth values with a shape [batch_size, seq_len].
79 ignore_index:
80 Integer specifying a target class to ignore. If given, this class index does not contribute
81 to the returned score.
82
83 Returns:
84 Log probabilities, summed over all samples
85 Number of samples
86
87 """
88 _check_shape_and_type_consistency(preds, target)
89
90 probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)
91 target = target.reshape(-1)
92
93 if ignore_index is not None:
94 mask = target.ne(ignore_index)
95 target = target.where(target != ignore_index, torch.tensor(0, device=target.device))
96 else:
97 mask = torch.ones_like(target, dtype=torch.bool)
98
99 probs = probs[:, target].diagonal()[mask]
100 total_log_probs = -probs.log().sum()
101 count = mask.sum()
102
103 return total_log_probs, count
104
105
106 def _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:
107 """Compute the Perplexity.
108
109 Args:
110 total: Log probabilities, summed over all samples
111 count: Number of samples
112 Returns:
113 Perplexity
114
115 """
116 return torch.exp(total / count)
117
118
119 def perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:
120 """Perplexity measures how well a language model predicts a text sample.
121
122 This metric is calculated as the average number of bits per word a model needs to represent the sample.
123
124 Args:
125 preds:
126 Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
127 vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.
128 target:
129 Ground truth values with a shape [batch_size, seq_len].
130 ignore_index:
131 Integer specifying a target class to ignore. If given, this class index does not contribute
132 to the returned score.
133
134 Returns:
135 Perplexity value
136
137 Examples:
138 >>> import torch
139 >>> gen = torch.manual_seed(42)
140 >>> preds = torch.rand(2, 8, 5, generator=gen)
141 >>> target = torch.randint(5, (2, 8), generator=gen)
142 >>> target[0, 6:] = -100
143 >>> perplexity(preds, target, ignore_index=-100)
144 tensor(5.8540)
145
146 """
147 total, count = _perplexity_update(preds, target, ignore_index)
148 return _perplexity_compute(total, count)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/functional/text/perplexity.py b/src/torchmetrics/functional/text/perplexity.py
--- a/src/torchmetrics/functional/text/perplexity.py
+++ b/src/torchmetrics/functional/text/perplexity.py
@@ -16,9 +16,6 @@
import torch
from torch import Tensor
-from torch.nn import functional as F # noqa: N812
-
-_TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)
def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:
@@ -59,10 +56,8 @@
"Input tensors `preds` and `target` are expected to have equaling first two dimensions,"
f" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}."
)
- if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:
- raise TypeError(
- f"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}."
- )
+ if not preds.is_floating_point():
+ raise TypeError(f"Input tensor `preds` is expected to be of floating point type but got {preds.dtype}.")
if target.dtype != torch.int64:
raise TypeError(f"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.")
@@ -87,7 +82,7 @@
"""
_check_shape_and_type_consistency(preds, target)
- probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)
+ probs = torch.nn.functional.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)
target = target.reshape(-1)
if ignore_index is not None:
| {"golden_diff": "diff --git a/src/torchmetrics/functional/text/perplexity.py b/src/torchmetrics/functional/text/perplexity.py\n--- a/src/torchmetrics/functional/text/perplexity.py\n+++ b/src/torchmetrics/functional/text/perplexity.py\n@@ -16,9 +16,6 @@\n \n import torch\n from torch import Tensor\n-from torch.nn import functional as F # noqa: N812\n-\n-_TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)\n \n \n def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:\n@@ -59,10 +56,8 @@\n \"Input tensors `preds` and `target` are expected to have equaling first two dimensions,\"\n f\" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}.\"\n )\n- if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:\n- raise TypeError(\n- f\"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}.\"\n- )\n+ if not preds.is_floating_point():\n+ raise TypeError(f\"Input tensor `preds` is expected to be of floating point type but got {preds.dtype}.\")\n if target.dtype != torch.int64:\n raise TypeError(f\"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.\")\n \n@@ -87,7 +82,7 @@\n \"\"\"\n _check_shape_and_type_consistency(preds, target)\n \n- probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n+ probs = torch.nn.functional.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n target = target.reshape(-1)\n \n if ignore_index is not None:\n", "issue": "Perplexity dtype restriction too strict\n## \ud83d\udc1b Bug\r\n\r\nThe perplexity metric requires the input dtype to be either fp32 or fp64, but this doesn't work with e.g. fp16, and users need to manually recast.\r\n\r\nhttps://github.com/Lightning-AI/torchmetrics/blob/894de4caeeae820f60f1871d75334873241e5633/src/torchmetrics/functional/text/perplexity.py#L21\r\n\r\n### Expected behavior\r\n\r\nThe metric should accept other floating point dtypes.\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional as F # noqa: N812\n\n_TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)\n\n\ndef _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:\n \"\"\"Check shape and type consistency of input vectors.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n\n Raises:\n ValueError:\n If ``preds`` tensor has no 3 dimensions.\n ValueError:\n If ``target`` tensor has no 2 dimensions.\n ValueError:\n If the first two dimensions of ``preds`` and ``target`` do not equal.\n TypeError:\n If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``\n TypeError:\n If ``target`` is not of a type LongTensor (torch.int64)\n\n \"\"\"\n if len(preds.shape) != 3:\n raise ValueError(\n \"Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],\"\n f\" but got {len(preds.shape)}.\"\n )\n if len(target.shape) != 2:\n raise ValueError(\n \"Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],\"\n f\" but got {len(target.shape)}.\"\n )\n if preds.shape[:2] != target.shape:\n raise ValueError(\n \"Input tensors `preds` and `target` are expected to have equaling first two dimensions,\"\n f\" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}.\"\n )\n if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:\n raise TypeError(\n f\"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}.\"\n )\n if target.dtype != torch.int64:\n raise TypeError(f\"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.\")\n\n\ndef _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute intermediate statistics for Perplexity.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Log probabilities, summed over all samples\n Number of samples\n\n \"\"\"\n _check_shape_and_type_consistency(preds, target)\n\n probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n target = target.reshape(-1)\n\n if ignore_index is not None:\n mask = target.ne(ignore_index)\n target = target.where(target != ignore_index, torch.tensor(0, device=target.device))\n else:\n mask = torch.ones_like(target, dtype=torch.bool)\n\n probs = probs[:, target].diagonal()[mask]\n total_log_probs = -probs.log().sum()\n count = mask.sum()\n\n return total_log_probs, count\n\n\ndef _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:\n \"\"\"Compute the Perplexity.\n\n Args:\n total: Log probabilities, summed over all samples\n count: Number of samples\n Returns:\n Perplexity\n\n \"\"\"\n return torch.exp(total / count)\n\n\ndef perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:\n \"\"\"Perplexity measures how well a language model predicts a text sample.\n\n This metric is calculated as the average number of bits per word a model needs to represent the sample.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Perplexity value\n\n Examples:\n >>> import torch\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand(2, 8, 5, generator=gen)\n >>> target = torch.randint(5, (2, 8), generator=gen)\n >>> target[0, 6:] = -100\n >>> perplexity(preds, target, ignore_index=-100)\n tensor(5.8540)\n\n \"\"\"\n total, count = _perplexity_update(preds, target, ignore_index)\n return _perplexity_compute(total, count)\n", "path": "src/torchmetrics/functional/text/perplexity.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\n\ndef _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:\n \"\"\"Check shape and type consistency of input vectors.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n\n Raises:\n ValueError:\n If ``preds`` tensor has no 3 dimensions.\n ValueError:\n If ``target`` tensor has no 2 dimensions.\n ValueError:\n If the first two dimensions of ``preds`` and ``target`` do not equal.\n TypeError:\n If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``\n TypeError:\n If ``target`` is not of a type LongTensor (torch.int64)\n\n \"\"\"\n if len(preds.shape) != 3:\n raise ValueError(\n \"Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],\"\n f\" but got {len(preds.shape)}.\"\n )\n if len(target.shape) != 2:\n raise ValueError(\n \"Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],\"\n f\" but got {len(target.shape)}.\"\n )\n if preds.shape[:2] != target.shape:\n raise ValueError(\n \"Input tensors `preds` and `target` are expected to have equaling first two dimensions,\"\n f\" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}.\"\n )\n if not preds.is_floating_point():\n raise TypeError(f\"Input tensor `preds` is expected to be of floating point type but got {preds.dtype}.\")\n if target.dtype != torch.int64:\n raise TypeError(f\"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.\")\n\n\ndef _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute intermediate statistics for Perplexity.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Log probabilities, summed over all samples\n Number of samples\n\n \"\"\"\n _check_shape_and_type_consistency(preds, target)\n\n probs = torch.nn.functional.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n target = target.reshape(-1)\n\n if ignore_index is not None:\n mask = target.ne(ignore_index)\n target = target.where(target != ignore_index, torch.tensor(0, device=target.device))\n else:\n mask = torch.ones_like(target, dtype=torch.bool)\n\n probs = probs[:, target].diagonal()[mask]\n total_log_probs = -probs.log().sum()\n count = mask.sum()\n\n return total_log_probs, count\n\n\ndef _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:\n \"\"\"Compute the Perplexity.\n\n Args:\n total: Log probabilities, summed over all samples\n count: Number of samples\n Returns:\n Perplexity\n\n \"\"\"\n return torch.exp(total / count)\n\n\ndef perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:\n \"\"\"Perplexity measures how well a language model predicts a text sample.\n\n This metric is calculated as the average number of bits per word a model needs to represent the sample.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Perplexity value\n\n Examples:\n >>> import torch\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand(2, 8, 5, generator=gen)\n >>> target = torch.randint(5, (2, 8), generator=gen)\n >>> target[0, 6:] = -100\n >>> perplexity(preds, target, ignore_index=-100)\n tensor(5.8540)\n\n \"\"\"\n total, count = _perplexity_update(preds, target, ignore_index)\n return _perplexity_compute(total, count)\n", "path": "src/torchmetrics/functional/text/perplexity.py"}]} | 2,040 | 419 |
gh_patches_debug_25918 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/booster/booster.py`
Content:
```
1 import warnings
2 from contextlib import contextmanager
3 from typing import Callable, Iterator, List, Optional, Tuple, Union
4
5 import torch
6 import torch.nn as nn
7 from torch.optim import Optimizer
8 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
9 from torch.utils.data import DataLoader
10
11 from colossalai.checkpoint_io import GeneralCheckpointIO
12
13 from .accelerator import Accelerator
14 from .mixed_precision import MixedPrecision, mixed_precision_factory
15 from .plugin import Plugin
16
17 __all__ = ['Booster']
18
19
20 class Booster:
21 """
22 Booster is a high-level API for training neural networks. It provides a unified interface for
23 training with different precision, accelerator, and plugin.
24
25 Examples:
26 >>> colossalai.launch(...)
27 >>> plugin = GeminiPlugin(stage=3, ...)
28 >>> booster = Booster(precision='fp16', plugin=plugin)
29 >>>
30 >>> model = GPT2()
31 >>> optimizer = Adam(model.parameters())
32 >>> dataloader = Dataloader(Dataset)
33 >>> lr_scheduler = LinearWarmupScheduler()
34 >>> criterion = GPTLMLoss()
35 >>>
36 >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
37 >>>
38 >>> for epoch in range(max_epochs):
39 >>> for input_ids, attention_mask in dataloader:
40 >>> outputs = model(input_ids, attention_mask)
41 >>> loss = criterion(outputs.logits, input_ids)
42 >>> booster.backward(loss, optimizer)
43 >>> optimizer.step()
44 >>> lr_scheduler.step()
45 >>> optimizer.zero_grad()
46
47
48 Args:
49 device (str or torch.device): The device to run the training. Default: 'cuda'.
50 mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.
51 If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.
52 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.
53 plugin (Plugin): The plugin to run the training. Default: None.
54 """
55
56 def __init__(self,
57 device: str = 'cuda',
58 mixed_precision: Union[MixedPrecision, str] = None,
59 plugin: Optional[Plugin] = None) -> None:
60 if plugin is not None:
61 assert isinstance(
62 plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'
63 self.plugin = plugin
64
65 # set accelerator
66 if self.plugin and self.plugin.control_device():
67 self.accelerator = None
68 warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')
69 else:
70 self.accelerator = Accelerator(device)
71
72 # set precision
73 if self.plugin and self.plugin.control_precision():
74 warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')
75 self.mixed_precision = None
76 elif mixed_precision is None:
77 self.mixed_precision = None
78 else:
79 # validate and set precision
80 if isinstance(mixed_precision, str):
81 # the user will take the default arguments for amp training
82 self.mixed_precision = mixed_precision_factory(mixed_precision)
83 elif isinstance(mixed_precision, MixedPrecision):
84 # the user can customize the arguments by passing the precision object
85 self.mixed_precision = mixed_precision
86 else:
87 raise ValueError(
88 f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'
89 )
90
91 if self.plugin is not None and self.plugin.control_checkpoint_io():
92 self.checkpoint_io = self.plugin.get_checkpoint_io()
93 else:
94 self.checkpoint_io = GeneralCheckpointIO()
95
96 def boost(
97 self,
98 model: nn.Module,
99 optimizer: Optimizer,
100 criterion: Callable = None,
101 dataloader: DataLoader = None,
102 lr_scheduler: LRScheduler = None,
103 ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:
104 """
105 Boost the model, optimizer, criterion, lr_scheduler, and dataloader.
106
107 Args:
108 model (nn.Module): The model to be boosted.
109 optimizer (Optimizer): The optimizer to be boosted.
110 criterion (Callable): The criterion to be boosted.
111 dataloader (DataLoader): The dataloader to be boosted.
112 lr_scheduler (LRScheduler): The lr_scheduler to be boosted.
113 """
114 # TODO(FrankLeeeee): consider multi-model and multi-optimizer case
115 # TODO(FrankLeeeee): consider multi-dataloader case
116 # transform model for mixed precision
117 if self.plugin:
118 model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(
119 model, optimizer, criterion, dataloader, lr_scheduler)
120
121 if self.plugin and not self.plugin.control_device():
122 # transform model for accelerator
123 model = self.accelerator.configure(model)
124
125 if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):
126 # transform model for mixed precision
127 # when mixed_precision is specified and the plugin is not given or does not control the precision
128 model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)
129
130 return model, optimizer, criterion, dataloader, lr_scheduler
131
132 def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:
133 # TODO: implement this method with plugin
134 optimizer.backward(loss)
135
136 def execute_pipeline(self,
137 data_iter: Iterator,
138 model: nn.Module,
139 criterion: Callable[[torch.Tensor], torch.Tensor],
140 optimizer: Optimizer,
141 return_loss: bool = True,
142 return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:
143 # TODO: implement this method
144 # run pipeline forward backward pass
145 # return loss or outputs if needed
146 pass
147
148 def no_sync(self, model: nn.Module) -> contextmanager:
149 assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'
150 assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'
151 return self.plugin.no_sync(model)
152
153 def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):
154 """Load model from checkpoint.
155
156 Args:
157 model (nn.Module): A model boosted by Booster.
158 checkpoint (str): Path to the checkpoint. It must be a local path.
159 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
160 strict (bool, optional): whether to strictly enforce that the keys
161 in :attr:`state_dict` match the keys returned by this module's
162 :meth:`~torch.nn.Module.state_dict` function. Defaults to True.
163 """
164 self.checkpoint_io.load_model(model, checkpoint, strict)
165
166 def save_model(self,
167 model: nn.Module,
168 checkpoint: str,
169 prefix: str = None,
170 shard: bool = False,
171 size_per_shard: int = 1024):
172 """Save model to checkpoint.
173
174 Args:
175 model (nn.Module): A model boosted by Booster.
176 checkpoint (str): Path to the checkpoint. It must be a local path.
177 It is a file path if ``shard=False``. Otherwise, it is a directory path.
178 prefix (str, optional): A prefix added to parameter and buffer
179 names to compose the keys in state_dict. Defaults to None.
180 shard (bool, optional): Whether to save checkpoint a sharded way.
181 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
182 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
183 """
184 self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)
185
186 def load_optimizer(self, optimizer: Optimizer, checkpoint: str):
187 """Load optimizer from checkpoint.
188
189 Args:
190 optimizer (Optimizer): An optimizer boosted by Booster.
191 checkpoint (str): Path to the checkpoint. It must be a local path.
192 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
193 """
194 self.checkpoint_io.load_optimizer(optimizer, checkpoint)
195
196 def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):
197 """Save optimizer to checkpoint.
198 Warning: Saving sharded optimizer checkpoint is not supported yet.
199
200 Args:
201 optimizer (Optimizer): An optimizer boosted by Booster.
202 checkpoint (str): Path to the checkpoint. It must be a local path.
203 It is a file path if ``shard=False``. Otherwise, it is a directory path.
204 shard (bool, optional): Whether to save checkpoint a sharded way.
205 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
206 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
207 """
208 self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)
209
210 def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
211 """Save lr scheduler to checkpoint.
212
213 Args:
214 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
215 checkpoint (str): Path to the checkpoint. It must be a local file path.
216 """
217 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)
218
219 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
220 """Load lr scheduler from checkpoint.
221
222 Args:
223 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
224 checkpoint (str): Path to the checkpoint. It must be a local file path.
225 """
226 self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py
--- a/colossalai/booster/booster.py
+++ b/colossalai/booster/booster.py
@@ -130,6 +130,12 @@
return model, optimizer, criterion, dataloader, lr_scheduler
def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:
+ """Backward pass.
+
+ Args:
+ loss (torch.Tensor): The loss to be backpropagated.
+ optimizer (Optimizer): The optimizer to be updated.
+ """
# TODO: implement this method with plugin
optimizer.backward(loss)
@@ -146,6 +152,14 @@
pass
def no_sync(self, model: nn.Module) -> contextmanager:
+ """Context manager to disable gradient synchronization across DP process groups.
+
+ Args:
+ model (nn.Module): The model to be disabled gradient synchronization.
+
+ Returns:
+ contextmanager: Context to disable gradient synchronization.
+ """
assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'
assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'
return self.plugin.no_sync(model)
| {"golden_diff": "diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py\n--- a/colossalai/booster/booster.py\n+++ b/colossalai/booster/booster.py\n@@ -130,6 +130,12 @@\n return model, optimizer, criterion, dataloader, lr_scheduler\n \n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n+ \"\"\"Backward pass.\n+\n+ Args:\n+ loss (torch.Tensor): The loss to be backpropagated.\n+ optimizer (Optimizer): The optimizer to be updated.\n+ \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n \n@@ -146,6 +152,14 @@\n pass\n \n def no_sync(self, model: nn.Module) -> contextmanager:\n+ \"\"\"Context manager to disable gradient synchronization across DP process groups.\n+\n+ Args:\n+ model (nn.Module): The model to be disabled gradient synchronization.\n+\n+ Returns:\n+ contextmanager: Context to disable gradient synchronization.\n+ \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n >>> colossalai.launch(...)\n >>> plugin = GeminiPlugin(stage=3, ...)\n >>> booster = Booster(precision='fp16', plugin=plugin)\n >>>\n >>> model = GPT2()\n >>> optimizer = Adam(model.parameters())\n >>> dataloader = Dataloader(Dataset)\n >>> lr_scheduler = LinearWarmupScheduler()\n >>> criterion = GPTLMLoss()\n >>>\n >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n >>>\n >>> for epoch in range(max_epochs):\n >>> for input_ids, attention_mask in dataloader:\n >>> outputs = model(input_ids, attention_mask)\n >>> loss = criterion(outputs.logits, input_ids)\n >>> booster.backward(loss, optimizer)\n >>> optimizer.step()\n >>> lr_scheduler.step()\n >>> optimizer.zero_grad()\n\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}], "after_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n >>> colossalai.launch(...)\n >>> plugin = GeminiPlugin(stage=3, ...)\n >>> booster = Booster(precision='fp16', plugin=plugin)\n >>>\n >>> model = GPT2()\n >>> optimizer = Adam(model.parameters())\n >>> dataloader = Dataloader(Dataset)\n >>> lr_scheduler = LinearWarmupScheduler()\n >>> criterion = GPTLMLoss()\n >>>\n >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n >>>\n >>> for epoch in range(max_epochs):\n >>> for input_ids, attention_mask in dataloader:\n >>> outputs = model(input_ids, attention_mask)\n >>> loss = criterion(outputs.logits, input_ids)\n >>> booster.backward(loss, optimizer)\n >>> optimizer.step()\n >>> lr_scheduler.step()\n >>> optimizer.zero_grad()\n\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}]} | 3,065 | 311 |
gh_patches_debug_6988 | rasdani/github-patches | git_diff | napari__napari-4264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'napari._vispy.layers.image' is not a package
## Activating experimental rendering features causes persistent ModuleNotFoundError
Recently, I discovered the two "experimental" features designed to enhance viewer performance with asynchronous rendering. After checking both options in Preferences (under Experimental), the GUI prompted a restart. When attempting to relaunch Napari, the following error is thrown: `ModuleNotFoundError: No module named 'napari._vispy.layers.image.experimental'; 'napari._vispy.layers.image' is not a package`. I tried recreating my virtual environment, installing a different version of Napari, using the `--no-cache-dir` flag with pip to ensure that the installation process did not use cached files...but every new installation of Napari now throws this error.
## To Reproduce
Steps to reproduce the behavior:
1. Check both experimental rendering options under File/Preferences/Experimental
2. Restart Napari
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_vispy/utils/visual.py`
Content:
```
1 from typing import Tuple
2
3 import numpy as np
4 from vispy.scene.widgets.viewbox import ViewBox
5
6 from ...layers import (
7 Image,
8 Labels,
9 Layer,
10 Points,
11 Shapes,
12 Surface,
13 Tracks,
14 Vectors,
15 )
16 from ...utils.config import async_octree
17 from ...utils.translations import trans
18 from ..layers.base import VispyBaseLayer
19 from ..layers.image import VispyImageLayer
20 from ..layers.labels import VispyLabelsLayer
21 from ..layers.points import VispyPointsLayer
22 from ..layers.shapes import VispyShapesLayer
23 from ..layers.surface import VispySurfaceLayer
24 from ..layers.tracks import VispyTracksLayer
25 from ..layers.vectors import VispyVectorsLayer
26
27 layer_to_visual = {
28 Image: VispyImageLayer,
29 Labels: VispyLabelsLayer,
30 Points: VispyPointsLayer,
31 Shapes: VispyShapesLayer,
32 Surface: VispySurfaceLayer,
33 Vectors: VispyVectorsLayer,
34 Tracks: VispyTracksLayer,
35 }
36
37
38 if async_octree:
39 from ..layers.image.experimental.octree_image import _OctreeImageBase
40 from .experimental.vispy_tiled_image_layer import VispyTiledImageLayer
41
42 # Insert _OctreeImageBase in front so it gets picked over plain Image.
43 new_mapping = {_OctreeImageBase: VispyTiledImageLayer}
44 new_mapping.update(layer_to_visual)
45 layer_to_visual = new_mapping
46
47
48 def create_vispy_visual(layer: Layer) -> VispyBaseLayer:
49 """Create vispy visual for a layer based on its layer type.
50
51 Parameters
52 ----------
53 layer : napari.layers._base_layer.Layer
54 Layer that needs its property widget created.
55
56 Returns
57 -------
58 visual : vispy.scene.visuals.VisualNode
59 Vispy visual node
60 """
61 for layer_type, visual_class in layer_to_visual.items():
62 if isinstance(layer, layer_type):
63 return visual_class(layer)
64
65 raise TypeError(
66 trans._(
67 'Could not find VispyLayer for layer of type {dtype}',
68 deferred=True,
69 dtype=type(layer),
70 )
71 )
72
73
74 def get_view_direction_in_scene_coordinates(
75 view: ViewBox,
76 ndim: int,
77 dims_displayed: Tuple[int],
78 ) -> np.ndarray:
79 """Calculate the unit vector pointing in the direction of the view.
80
81 This is only for 3D viewing, so it returns None when
82 len(dims_displayed) == 2.
83 Adapted From:
84 https://stackoverflow.com/questions/37877592/
85 get-view-direction-relative-to-scene-in-vispy/37882984
86
87 Parameters
88 ----------
89 view : vispy.scene.widgets.viewbox.ViewBox
90 The vispy view box object to get the view direction from.
91 ndim : int
92 The number of dimensions in the full nD dims model.
93 This is typically from viewer.dims.ndim
94 dims_displayed : Tuple[int]
95 The indices of the dims displayed in the viewer.
96 This is typically from viewer.dims.displayed.
97
98 Returns
99 -------
100 view_vector : np.ndarray
101 Unit vector in the direction of the view in scene coordinates.
102 Axes are ordered zyx. If the viewer is in 2D
103 (i.e., len(dims_displayed) == 2), view_vector is None.
104 """
105 # only return a vector when viewing in 3D
106 if len(dims_displayed) == 2:
107 return None
108
109 tform = view.scene.transform
110 w, h = view.canvas.size
111
112 # get a point at the center of the canvas
113 # (homogeneous screen coords)
114 screen_center = np.array([w / 2, h / 2, 0, 1])
115
116 # find a point just in front of the center point
117 # transform both to world coords and find the vector
118 d1 = np.array([0, 0, 1, 0])
119 point_in_front_of_screen_center = screen_center + d1
120 p1 = tform.imap(point_in_front_of_screen_center)
121 p0 = tform.imap(screen_center)
122 d2 = p1 - p0
123
124 # in 3D world coordinates
125 d3 = d2[0:3]
126 d4 = d3 / np.linalg.norm(d3)
127
128 # data are ordered xyz on vispy Volume
129 d4 = d4[[2, 1, 0]]
130 view_dir_world = np.zeros((ndim,))
131 for i, d in enumerate(dims_displayed):
132 view_dir_world[d] = d4[i]
133
134 return view_dir_world
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_vispy/utils/visual.py b/napari/_vispy/utils/visual.py
--- a/napari/_vispy/utils/visual.py
+++ b/napari/_vispy/utils/visual.py
@@ -36,8 +36,8 @@
if async_octree:
- from ..layers.image.experimental.octree_image import _OctreeImageBase
- from .experimental.vispy_tiled_image_layer import VispyTiledImageLayer
+ from ...layers.image.experimental.octree_image import _OctreeImageBase
+ from ..experimental.vispy_tiled_image_layer import VispyTiledImageLayer
# Insert _OctreeImageBase in front so it gets picked over plain Image.
new_mapping = {_OctreeImageBase: VispyTiledImageLayer}
| {"golden_diff": "diff --git a/napari/_vispy/utils/visual.py b/napari/_vispy/utils/visual.py\n--- a/napari/_vispy/utils/visual.py\n+++ b/napari/_vispy/utils/visual.py\n@@ -36,8 +36,8 @@\n \n \n if async_octree:\n- from ..layers.image.experimental.octree_image import _OctreeImageBase\n- from .experimental.vispy_tiled_image_layer import VispyTiledImageLayer\n+ from ...layers.image.experimental.octree_image import _OctreeImageBase\n+ from ..experimental.vispy_tiled_image_layer import VispyTiledImageLayer\n \n # Insert _OctreeImageBase in front so it gets picked over plain Image.\n new_mapping = {_OctreeImageBase: VispyTiledImageLayer}\n", "issue": "'napari._vispy.layers.image' is not a package\n## Activating experimental rendering features causes persistent ModuleNotFoundError\r\n\r\nRecently, I discovered the two \"experimental\" features designed to enhance viewer performance with asynchronous rendering. After checking both options in Preferences (under Experimental), the GUI prompted a restart. When attempting to relaunch Napari, the following error is thrown: `ModuleNotFoundError: No module named 'napari._vispy.layers.image.experimental'; 'napari._vispy.layers.image' is not a package`. I tried recreating my virtual environment, installing a different version of Napari, using the `--no-cache-dir` flag with pip to ensure that the installation process did not use cached files...but every new installation of Napari now throws this error.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Check both experimental rendering options under File/Preferences/Experimental\r\n2. Restart Napari\r\n\n", "before_files": [{"content": "from typing import Tuple\n\nimport numpy as np\nfrom vispy.scene.widgets.viewbox import ViewBox\n\nfrom ...layers import (\n Image,\n Labels,\n Layer,\n Points,\n Shapes,\n Surface,\n Tracks,\n Vectors,\n)\nfrom ...utils.config import async_octree\nfrom ...utils.translations import trans\nfrom ..layers.base import VispyBaseLayer\nfrom ..layers.image import VispyImageLayer\nfrom ..layers.labels import VispyLabelsLayer\nfrom ..layers.points import VispyPointsLayer\nfrom ..layers.shapes import VispyShapesLayer\nfrom ..layers.surface import VispySurfaceLayer\nfrom ..layers.tracks import VispyTracksLayer\nfrom ..layers.vectors import VispyVectorsLayer\n\nlayer_to_visual = {\n Image: VispyImageLayer,\n Labels: VispyLabelsLayer,\n Points: VispyPointsLayer,\n Shapes: VispyShapesLayer,\n Surface: VispySurfaceLayer,\n Vectors: VispyVectorsLayer,\n Tracks: VispyTracksLayer,\n}\n\n\nif async_octree:\n from ..layers.image.experimental.octree_image import _OctreeImageBase\n from .experimental.vispy_tiled_image_layer import VispyTiledImageLayer\n\n # Insert _OctreeImageBase in front so it gets picked over plain Image.\n new_mapping = {_OctreeImageBase: VispyTiledImageLayer}\n new_mapping.update(layer_to_visual)\n layer_to_visual = new_mapping\n\n\ndef create_vispy_visual(layer: Layer) -> VispyBaseLayer:\n \"\"\"Create vispy visual for a layer based on its layer type.\n\n Parameters\n ----------\n layer : napari.layers._base_layer.Layer\n Layer that needs its property widget created.\n\n Returns\n -------\n visual : vispy.scene.visuals.VisualNode\n Vispy visual node\n \"\"\"\n for layer_type, visual_class in layer_to_visual.items():\n if isinstance(layer, layer_type):\n return visual_class(layer)\n\n raise TypeError(\n trans._(\n 'Could not find VispyLayer for layer of type {dtype}',\n deferred=True,\n dtype=type(layer),\n )\n )\n\n\ndef get_view_direction_in_scene_coordinates(\n view: ViewBox,\n ndim: int,\n dims_displayed: Tuple[int],\n) -> np.ndarray:\n \"\"\"Calculate the unit vector pointing in the direction of the view.\n\n This is only for 3D viewing, so it returns None when\n len(dims_displayed) == 2.\n Adapted From:\n https://stackoverflow.com/questions/37877592/\n get-view-direction-relative-to-scene-in-vispy/37882984\n\n Parameters\n ----------\n view : vispy.scene.widgets.viewbox.ViewBox\n The vispy view box object to get the view direction from.\n ndim : int\n The number of dimensions in the full nD dims model.\n This is typically from viewer.dims.ndim\n dims_displayed : Tuple[int]\n The indices of the dims displayed in the viewer.\n This is typically from viewer.dims.displayed.\n\n Returns\n -------\n view_vector : np.ndarray\n Unit vector in the direction of the view in scene coordinates.\n Axes are ordered zyx. If the viewer is in 2D\n (i.e., len(dims_displayed) == 2), view_vector is None.\n \"\"\"\n # only return a vector when viewing in 3D\n if len(dims_displayed) == 2:\n return None\n\n tform = view.scene.transform\n w, h = view.canvas.size\n\n # get a point at the center of the canvas\n # (homogeneous screen coords)\n screen_center = np.array([w / 2, h / 2, 0, 1])\n\n # find a point just in front of the center point\n # transform both to world coords and find the vector\n d1 = np.array([0, 0, 1, 0])\n point_in_front_of_screen_center = screen_center + d1\n p1 = tform.imap(point_in_front_of_screen_center)\n p0 = tform.imap(screen_center)\n d2 = p1 - p0\n\n # in 3D world coordinates\n d3 = d2[0:3]\n d4 = d3 / np.linalg.norm(d3)\n\n # data are ordered xyz on vispy Volume\n d4 = d4[[2, 1, 0]]\n view_dir_world = np.zeros((ndim,))\n for i, d in enumerate(dims_displayed):\n view_dir_world[d] = d4[i]\n\n return view_dir_world\n", "path": "napari/_vispy/utils/visual.py"}], "after_files": [{"content": "from typing import Tuple\n\nimport numpy as np\nfrom vispy.scene.widgets.viewbox import ViewBox\n\nfrom ...layers import (\n Image,\n Labels,\n Layer,\n Points,\n Shapes,\n Surface,\n Tracks,\n Vectors,\n)\nfrom ...utils.config import async_octree\nfrom ...utils.translations import trans\nfrom ..layers.base import VispyBaseLayer\nfrom ..layers.image import VispyImageLayer\nfrom ..layers.labels import VispyLabelsLayer\nfrom ..layers.points import VispyPointsLayer\nfrom ..layers.shapes import VispyShapesLayer\nfrom ..layers.surface import VispySurfaceLayer\nfrom ..layers.tracks import VispyTracksLayer\nfrom ..layers.vectors import VispyVectorsLayer\n\nlayer_to_visual = {\n Image: VispyImageLayer,\n Labels: VispyLabelsLayer,\n Points: VispyPointsLayer,\n Shapes: VispyShapesLayer,\n Surface: VispySurfaceLayer,\n Vectors: VispyVectorsLayer,\n Tracks: VispyTracksLayer,\n}\n\n\nif async_octree:\n from ...layers.image.experimental.octree_image import _OctreeImageBase\n from ..experimental.vispy_tiled_image_layer import VispyTiledImageLayer\n\n # Insert _OctreeImageBase in front so it gets picked over plain Image.\n new_mapping = {_OctreeImageBase: VispyTiledImageLayer}\n new_mapping.update(layer_to_visual)\n layer_to_visual = new_mapping\n\n\ndef create_vispy_visual(layer: Layer) -> VispyBaseLayer:\n \"\"\"Create vispy visual for a layer based on its layer type.\n\n Parameters\n ----------\n layer : napari.layers._base_layer.Layer\n Layer that needs its property widget created.\n\n Returns\n -------\n visual : vispy.scene.visuals.VisualNode\n Vispy visual node\n \"\"\"\n for layer_type, visual_class in layer_to_visual.items():\n if isinstance(layer, layer_type):\n return visual_class(layer)\n\n raise TypeError(\n trans._(\n 'Could not find VispyLayer for layer of type {dtype}',\n deferred=True,\n dtype=type(layer),\n )\n )\n\n\ndef get_view_direction_in_scene_coordinates(\n view: ViewBox,\n ndim: int,\n dims_displayed: Tuple[int],\n) -> np.ndarray:\n \"\"\"Calculate the unit vector pointing in the direction of the view.\n\n This is only for 3D viewing, so it returns None when\n len(dims_displayed) == 2.\n Adapted From:\n https://stackoverflow.com/questions/37877592/\n get-view-direction-relative-to-scene-in-vispy/37882984\n\n Parameters\n ----------\n view : vispy.scene.widgets.viewbox.ViewBox\n The vispy view box object to get the view direction from.\n ndim : int\n The number of dimensions in the full nD dims model.\n This is typically from viewer.dims.ndim\n dims_displayed : Tuple[int]\n The indices of the dims displayed in the viewer.\n This is typically from viewer.dims.displayed.\n\n Returns\n -------\n view_vector : np.ndarray\n Unit vector in the direction of the view in scene coordinates.\n Axes are ordered zyx. If the viewer is in 2D\n (i.e., len(dims_displayed) == 2), view_vector is None.\n \"\"\"\n # only return a vector when viewing in 3D\n if len(dims_displayed) == 2:\n return None\n\n tform = view.scene.transform\n w, h = view.canvas.size\n\n # get a point at the center of the canvas\n # (homogeneous screen coords)\n screen_center = np.array([w / 2, h / 2, 0, 1])\n\n # find a point just in front of the center point\n # transform both to world coords and find the vector\n d1 = np.array([0, 0, 1, 0])\n point_in_front_of_screen_center = screen_center + d1\n p1 = tform.imap(point_in_front_of_screen_center)\n p0 = tform.imap(screen_center)\n d2 = p1 - p0\n\n # in 3D world coordinates\n d3 = d2[0:3]\n d4 = d3 / np.linalg.norm(d3)\n\n # data are ordered xyz on vispy Volume\n d4 = d4[[2, 1, 0]]\n view_dir_world = np.zeros((ndim,))\n for i, d in enumerate(dims_displayed):\n view_dir_world[d] = d4[i]\n\n return view_dir_world\n", "path": "napari/_vispy/utils/visual.py"}]} | 1,767 | 179 |
gh_patches_debug_24454 | rasdani/github-patches | git_diff | deepchecks__deepchecks-399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception when running DataDuplicates check
To reproduce:
https://www.kaggle.com/itay94/notebookf8c78e84d7
```
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
/tmp/ipykernel_34/3771541214.py in <module>
1 from deepchecks.checks import DataDuplicates
2
----> 3 DataDuplicates().run(ds_train)
/opt/conda/lib/python3.7/site-packages/deepchecks/base/check.py in wrapped(*args, **kwargs)
275 @wraps(func)
276 def wrapped(*args, **kwargs):
--> 277 result = func(*args, **kwargs)
278 if not isinstance(result, CheckResult):
279 raise DeepchecksValueError(f'Check {class_instance.name()} expected to return CheckResult bot got: '
/opt/conda/lib/python3.7/site-packages/deepchecks/checks/integrity/data_duplicates.py in run(self, dataset, model)
71 raise DeepchecksValueError('Dataset does not contain any data')
72
---> 73 group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()
74 n_unique = len(group_unique_data)
75
/opt/conda/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in size(self)
1834 result = result.rename("size").reset_index()
1835
-> 1836 return self._reindex_output(result, fill_value=0)
1837
1838 @final
/opt/conda/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _reindex_output(self, output, fill_value)
3163 levels_list = [ping.group_index for ping in groupings]
3164 index, _ = MultiIndex.from_product(
-> 3165 levels_list, names=self.grouper.names
3166 ).sortlevel()
3167
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/multi.py in from_product(cls, iterables, sortorder, names)
618
619 # codes are all ndarrays, so cartesian_product is lossless
--> 620 codes = cartesian_product(codes)
621 return cls(levels, codes, sortorder=sortorder, names=names)
622
/opt/conda/lib/python3.7/site-packages/pandas/core/reshape/util.py in cartesian_product(X)
52 b = np.zeros_like(cumprodX)
53
---> 54 return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]
55
56
/opt/conda/lib/python3.7/site-packages/pandas/core/reshape/util.py in <listcomp>(.0)
52 b = np.zeros_like(cumprodX)
53
---> 54 return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]
55
56
<__array_function__ internals> in repeat(*args, **kwargs)
/opt/conda/lib/python3.7/site-packages/numpy/core/fromnumeric.py in repeat(a, repeats, axis)
477
478 """
--> 479 return _wrapfunc(a, 'repeat', repeats, axis=axis)
480
481
/opt/conda/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
56
57 try:
---> 58 return bound(*args, **kwds)
59 except TypeError:
60 # A TypeError occurs if the object does have such a method in its
MemoryError: Unable to allocate 589. PiB for an array with shape (331491645779374080,) and data type int16
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/checks/integrity/label_ambiguity.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """module contains Data Duplicates check."""
12 from typing import Union, List
13
14 import pandas as pd
15
16 from deepchecks import Dataset, ConditionResult
17 from deepchecks.base.check import CheckResult, SingleDatasetBaseCheck
18 from deepchecks.errors import DeepchecksValueError
19 from deepchecks.utils.metrics import task_type_validation, ModelType
20 from deepchecks.utils.strings import format_percent
21 from deepchecks.utils.typing import Hashable
22
23
24 __all__ = ['LabelAmbiguity']
25
26
27 class LabelAmbiguity(SingleDatasetBaseCheck):
28 """Find samples with multiple labels.
29
30 Args:
31 columns (Hashable, List[Hashable]):
32 List of columns to check, if none given checks
33 all columns Except ignored ones.
34 ignore_columns (Hashable, List[Hashable]):
35 List of columns to ignore, if none given checks
36 based on columns variable.
37 n_to_show (int):
38 number of most common ambiguous samples to show.
39 """
40
41 def __init__(
42 self,
43 columns: Union[Hashable, List[Hashable], None] = None,
44 ignore_columns: Union[Hashable, List[Hashable], None] = None,
45 n_to_show: int = 5
46 ):
47 super().__init__()
48 self.columns = columns
49 self.ignore_columns = ignore_columns
50 self.n_to_show = n_to_show
51
52 def run(self, dataset: Dataset, model=None) -> CheckResult:
53 """Run check.
54
55 Args:
56 dataset(Dataset): any dataset.
57 model (any): used to check task type (default: None)
58
59 Returns:
60 (CheckResult): percentage of ambiguous samples and display of the top n_to_show most ambiguous.
61 """
62 dataset: Dataset = Dataset.validate_dataset(dataset)
63 dataset = dataset.select(self.columns, self.ignore_columns)
64
65 if model:
66 task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])
67 elif dataset.label_type == 'regression_label':
68 raise DeepchecksValueError('Task type cannot be regression')
69
70 label_col = dataset.label_name
71
72 group_unique_data = dataset.data.groupby(dataset.features, dropna=False)
73 group_unique_labels = group_unique_data.nunique()[label_col]
74
75 num_ambiguous = 0
76 ambiguous_label_name = 'Observed Labels'
77 display = pd.DataFrame(columns=[ambiguous_label_name, *dataset.features])
78
79 for num_labels, group_data in sorted(zip(group_unique_labels, group_unique_data),
80 key=lambda x: x[0], reverse=True):
81 if num_labels == 1:
82 break
83
84 group_df = group_data[1]
85 sample_values = dict(group_df[dataset.features].iloc[0])
86 labels = tuple(group_df[label_col].unique())
87 n_data_sample = group_df.shape[0]
88 num_ambiguous += n_data_sample
89
90 display = display.append({ambiguous_label_name: labels, **sample_values}, ignore_index=True)
91
92 display = display.set_index(ambiguous_label_name)
93
94 explanation = ('Each row in the table shows an example of a data sample '
95 'and the it\'s observed labels as a found in the dataset.')
96
97 display = None if display.empty else [explanation, display.head(self.n_to_show)]
98
99 percent_ambiguous = num_ambiguous/dataset.n_samples
100
101 return CheckResult(value=percent_ambiguous, display=display)
102
103 def add_condition_ambiguous_sample_ratio_not_greater_than(self, max_ratio=0):
104 """Add condition - require samples with multiple labels to not be more than max_ratio.
105
106 Args:
107 max_ratio (float): Maximum ratio of samples with multiple labels.
108 """
109 def max_ratio_condition(result: float) -> ConditionResult:
110 if result > max_ratio:
111 return ConditionResult(False, f'Found {format_percent(result)} samples with multiple labels')
112 else:
113 return ConditionResult(True)
114
115 return self.add_condition(f'Ambiguous sample ratio is not greater than {format_percent(max_ratio)}',
116 max_ratio_condition)
117
```
Path: `deepchecks/checks/integrity/data_duplicates.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """module contains Data Duplicates check."""
12 from typing import Union, List
13
14 import numpy as np
15 import pandas as pd
16
17 from deepchecks import Dataset
18 from deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult, ConditionCategory
19 from deepchecks.utils.dataframes import select_from_dataframe
20 from deepchecks.utils.strings import format_percent, format_list
21 from deepchecks.utils.validation import ensure_dataframe_type
22 from deepchecks.utils.typing import Hashable
23 from deepchecks.errors import DeepchecksValueError
24
25
26 __all__ = ['DataDuplicates']
27
28
29 class DataDuplicates(SingleDatasetBaseCheck):
30 """Search for duplicate data in dataset.
31
32 Args:
33 columns (Hashable, Iterable[Hashable]):
34 List of columns to check, if none given checks
35 all columns Except ignored ones.
36 ignore_columns (Hashable, Iterable[Hashable]):
37 List of columns to ignore, if none given checks
38 based on columns variable.
39 n_to_show (int):
40 number of most common duplicated samples to show.
41 """
42
43 def __init__(
44 self,
45 columns: Union[Hashable, List[Hashable], None] = None,
46 ignore_columns: Union[Hashable, List[Hashable], None] = None,
47 n_to_show: int = 5
48 ):
49 super().__init__()
50 self.columns = columns
51 self.ignore_columns = ignore_columns
52 self.n_to_show = n_to_show
53
54 def run(self, dataset: Dataset, model=None) -> CheckResult:
55 """Run check.
56
57 Args:
58 dataset (Dataset): any dataset.
59
60 Returns:
61 (CheckResult): percentage of duplicates and display of the top n_to_show most duplicated.
62 """
63 df: pd.DataFrame = ensure_dataframe_type(dataset)
64 df = select_from_dataframe(df, self.columns, self.ignore_columns)
65
66 data_columns = list(df.columns)
67
68 n_samples = df.shape[0]
69
70 if n_samples == 0:
71 raise DeepchecksValueError('Dataset does not contain any data')
72
73 group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()
74 n_unique = len(group_unique_data)
75
76 percent_duplicate = 1 - (1.0 * int(n_unique)) / (1.0 * int(n_samples))
77
78 if percent_duplicate > 0:
79 # patched for anonymous_series
80 is_anonymous_series = 0 in group_unique_data.keys().names
81 if is_anonymous_series:
82 new_name = str(group_unique_data.keys().names)
83 new_index = group_unique_data.keys()
84 new_index.names = [new_name if name == 0 else name for name in new_index.names]
85 group_unique_data = group_unique_data.reindex(new_index)
86 duplicates_counted = group_unique_data.reset_index().rename(columns={0: 'Number of Duplicates'})
87 if is_anonymous_series:
88 duplicates_counted.rename(columns={new_name: 0}, inplace=True)
89
90 most_duplicates = duplicates_counted[duplicates_counted['Number of Duplicates'] > 1]. \
91 nlargest(self.n_to_show, ['Number of Duplicates'])
92
93 indexes = []
94 for row in most_duplicates.iloc():
95 indexes.append(format_list(df.index[np.all(df == row[data_columns], axis=1)].to_list()))
96
97 most_duplicates['Instances'] = indexes
98
99 most_duplicates = most_duplicates.set_index(['Instances', 'Number of Duplicates'])
100
101 text = f'{format_percent(percent_duplicate)} of data samples are duplicates'
102 explanation = 'Each row in the table shows an example of duplicate data and the number of times it appears.'
103 display = [text, explanation, most_duplicates]
104
105 else:
106 display = None
107
108 return CheckResult(value=percent_duplicate, display=display)
109
110 def add_condition_ratio_not_greater_than(self, max_ratio: float = 0):
111 """Add condition - require duplicate ratio to not surpass max_ratio.
112
113 Args:
114 max_ratio (float): Maximum ratio of duplicates.
115 """
116 def max_ratio_condition(result: float) -> ConditionResult:
117 if result > max_ratio:
118 return ConditionResult(False, f'Found {format_percent(result)} duplicate data',
119 category=ConditionCategory.WARN)
120 else:
121 return ConditionResult(True)
122
123 return self.add_condition(f'Duplicate data is not greater than {format_percent(max_ratio)}',
124 max_ratio_condition)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/checks/integrity/data_duplicates.py b/deepchecks/checks/integrity/data_duplicates.py
--- a/deepchecks/checks/integrity/data_duplicates.py
+++ b/deepchecks/checks/integrity/data_duplicates.py
@@ -70,6 +70,11 @@
if n_samples == 0:
raise DeepchecksValueError('Dataset does not contain any data')
+ # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually
+ category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()
+ if category_columns:
+ df = df.astype({c: 'object' for c in category_columns})
+
group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()
n_unique = len(group_unique_data)
diff --git a/deepchecks/checks/integrity/label_ambiguity.py b/deepchecks/checks/integrity/label_ambiguity.py
--- a/deepchecks/checks/integrity/label_ambiguity.py
+++ b/deepchecks/checks/integrity/label_ambiguity.py
@@ -69,7 +69,13 @@
label_col = dataset.label_name
- group_unique_data = dataset.data.groupby(dataset.features, dropna=False)
+ # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually
+ df = dataset.data
+ category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()
+ if category_columns:
+ df = df.astype({c: 'object' for c in category_columns})
+
+ group_unique_data = df.groupby(dataset.features, dropna=False)
group_unique_labels = group_unique_data.nunique()[label_col]
num_ambiguous = 0
| {"golden_diff": "diff --git a/deepchecks/checks/integrity/data_duplicates.py b/deepchecks/checks/integrity/data_duplicates.py\n--- a/deepchecks/checks/integrity/data_duplicates.py\n+++ b/deepchecks/checks/integrity/data_duplicates.py\n@@ -70,6 +70,11 @@\n if n_samples == 0:\n raise DeepchecksValueError('Dataset does not contain any data')\n \n+ # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually\n+ category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()\n+ if category_columns:\n+ df = df.astype({c: 'object' for c in category_columns})\n+\n group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()\n n_unique = len(group_unique_data)\n \ndiff --git a/deepchecks/checks/integrity/label_ambiguity.py b/deepchecks/checks/integrity/label_ambiguity.py\n--- a/deepchecks/checks/integrity/label_ambiguity.py\n+++ b/deepchecks/checks/integrity/label_ambiguity.py\n@@ -69,7 +69,13 @@\n \n label_col = dataset.label_name\n \n- group_unique_data = dataset.data.groupby(dataset.features, dropna=False)\n+ # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually\n+ df = dataset.data\n+ category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()\n+ if category_columns:\n+ df = df.astype({c: 'object' for c in category_columns})\n+\n+ group_unique_data = df.groupby(dataset.features, dropna=False)\n group_unique_labels = group_unique_data.nunique()[label_col]\n \n num_ambiguous = 0\n", "issue": "Exception when running DataDuplicates check\nTo reproduce:\r\nhttps://www.kaggle.com/itay94/notebookf8c78e84d7\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nMemoryError Traceback (most recent call last)\r\n/tmp/ipykernel_34/3771541214.py in <module>\r\n 1 from deepchecks.checks import DataDuplicates\r\n 2 \r\n----> 3 DataDuplicates().run(ds_train)\r\n\r\n/opt/conda/lib/python3.7/site-packages/deepchecks/base/check.py in wrapped(*args, **kwargs)\r\n 275 @wraps(func)\r\n 276 def wrapped(*args, **kwargs):\r\n--> 277 result = func(*args, **kwargs)\r\n 278 if not isinstance(result, CheckResult):\r\n 279 raise DeepchecksValueError(f'Check {class_instance.name()} expected to return CheckResult bot got: '\r\n\r\n/opt/conda/lib/python3.7/site-packages/deepchecks/checks/integrity/data_duplicates.py in run(self, dataset, model)\r\n 71 raise DeepchecksValueError('Dataset does not contain any data')\r\n 72 \r\n---> 73 group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()\r\n 74 n_unique = len(group_unique_data)\r\n 75 \r\n\r\n/opt/conda/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in size(self)\r\n 1834 result = result.rename(\"size\").reset_index()\r\n 1835 \r\n-> 1836 return self._reindex_output(result, fill_value=0)\r\n 1837 \r\n 1838 @final\r\n\r\n/opt/conda/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _reindex_output(self, output, fill_value)\r\n 3163 levels_list = [ping.group_index for ping in groupings]\r\n 3164 index, _ = MultiIndex.from_product(\r\n-> 3165 levels_list, names=self.grouper.names\r\n 3166 ).sortlevel()\r\n 3167 \r\n\r\n/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/multi.py in from_product(cls, iterables, sortorder, names)\r\n 618 \r\n 619 # codes are all ndarrays, so cartesian_product is lossless\r\n--> 620 codes = cartesian_product(codes)\r\n 621 return cls(levels, codes, sortorder=sortorder, names=names)\r\n 622 \r\n\r\n/opt/conda/lib/python3.7/site-packages/pandas/core/reshape/util.py in cartesian_product(X)\r\n 52 b = np.zeros_like(cumprodX)\r\n 53 \r\n---> 54 return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]\r\n 55 \r\n 56 \r\n\r\n/opt/conda/lib/python3.7/site-packages/pandas/core/reshape/util.py in <listcomp>(.0)\r\n 52 b = np.zeros_like(cumprodX)\r\n 53 \r\n---> 54 return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]\r\n 55 \r\n 56 \r\n\r\n<__array_function__ internals> in repeat(*args, **kwargs)\r\n\r\n/opt/conda/lib/python3.7/site-packages/numpy/core/fromnumeric.py in repeat(a, repeats, axis)\r\n 477 \r\n 478 \"\"\"\r\n--> 479 return _wrapfunc(a, 'repeat', repeats, axis=axis)\r\n 480 \r\n 481 \r\n\r\n/opt/conda/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)\r\n 56 \r\n 57 try:\r\n---> 58 return bound(*args, **kwds)\r\n 59 except TypeError:\r\n 60 # A TypeError occurs if the object does have such a method in its\r\n\r\nMemoryError: Unable to allocate 589. PiB for an array with shape (331491645779374080,) and data type int16\r\n```\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Data Duplicates check.\"\"\"\nfrom typing import Union, List\n\nimport pandas as pd\n\nfrom deepchecks import Dataset, ConditionResult\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck\nfrom deepchecks.errors import DeepchecksValueError\nfrom deepchecks.utils.metrics import task_type_validation, ModelType\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['LabelAmbiguity']\n\n\nclass LabelAmbiguity(SingleDatasetBaseCheck):\n \"\"\"Find samples with multiple labels.\n\n Args:\n columns (Hashable, List[Hashable]):\n List of columns to check, if none given checks\n all columns Except ignored ones.\n ignore_columns (Hashable, List[Hashable]):\n List of columns to ignore, if none given checks\n based on columns variable.\n n_to_show (int):\n number of most common ambiguous samples to show.\n \"\"\"\n\n def __init__(\n self,\n columns: Union[Hashable, List[Hashable], None] = None,\n ignore_columns: Union[Hashable, List[Hashable], None] = None,\n n_to_show: int = 5\n ):\n super().__init__()\n self.columns = columns\n self.ignore_columns = ignore_columns\n self.n_to_show = n_to_show\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset(Dataset): any dataset.\n model (any): used to check task type (default: None)\n\n Returns:\n (CheckResult): percentage of ambiguous samples and display of the top n_to_show most ambiguous.\n \"\"\"\n dataset: Dataset = Dataset.validate_dataset(dataset)\n dataset = dataset.select(self.columns, self.ignore_columns)\n\n if model:\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n elif dataset.label_type == 'regression_label':\n raise DeepchecksValueError('Task type cannot be regression')\n\n label_col = dataset.label_name\n\n group_unique_data = dataset.data.groupby(dataset.features, dropna=False)\n group_unique_labels = group_unique_data.nunique()[label_col]\n\n num_ambiguous = 0\n ambiguous_label_name = 'Observed Labels'\n display = pd.DataFrame(columns=[ambiguous_label_name, *dataset.features])\n\n for num_labels, group_data in sorted(zip(group_unique_labels, group_unique_data),\n key=lambda x: x[0], reverse=True):\n if num_labels == 1:\n break\n\n group_df = group_data[1]\n sample_values = dict(group_df[dataset.features].iloc[0])\n labels = tuple(group_df[label_col].unique())\n n_data_sample = group_df.shape[0]\n num_ambiguous += n_data_sample\n\n display = display.append({ambiguous_label_name: labels, **sample_values}, ignore_index=True)\n\n display = display.set_index(ambiguous_label_name)\n\n explanation = ('Each row in the table shows an example of a data sample '\n 'and the it\\'s observed labels as a found in the dataset.')\n\n display = None if display.empty else [explanation, display.head(self.n_to_show)]\n\n percent_ambiguous = num_ambiguous/dataset.n_samples\n\n return CheckResult(value=percent_ambiguous, display=display)\n\n def add_condition_ambiguous_sample_ratio_not_greater_than(self, max_ratio=0):\n \"\"\"Add condition - require samples with multiple labels to not be more than max_ratio.\n\n Args:\n max_ratio (float): Maximum ratio of samples with multiple labels.\n \"\"\"\n def max_ratio_condition(result: float) -> ConditionResult:\n if result > max_ratio:\n return ConditionResult(False, f'Found {format_percent(result)} samples with multiple labels')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Ambiguous sample ratio is not greater than {format_percent(max_ratio)}',\n max_ratio_condition)\n", "path": "deepchecks/checks/integrity/label_ambiguity.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Data Duplicates check.\"\"\"\nfrom typing import Union, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom deepchecks import Dataset\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult, ConditionCategory\nfrom deepchecks.utils.dataframes import select_from_dataframe\nfrom deepchecks.utils.strings import format_percent, format_list\nfrom deepchecks.utils.validation import ensure_dataframe_type\nfrom deepchecks.utils.typing import Hashable\nfrom deepchecks.errors import DeepchecksValueError\n\n\n__all__ = ['DataDuplicates']\n\n\nclass DataDuplicates(SingleDatasetBaseCheck):\n \"\"\"Search for duplicate data in dataset.\n\n Args:\n columns (Hashable, Iterable[Hashable]):\n List of columns to check, if none given checks\n all columns Except ignored ones.\n ignore_columns (Hashable, Iterable[Hashable]):\n List of columns to ignore, if none given checks\n based on columns variable.\n n_to_show (int):\n number of most common duplicated samples to show.\n \"\"\"\n\n def __init__(\n self,\n columns: Union[Hashable, List[Hashable], None] = None,\n ignore_columns: Union[Hashable, List[Hashable], None] = None,\n n_to_show: int = 5\n ):\n super().__init__()\n self.columns = columns\n self.ignore_columns = ignore_columns\n self.n_to_show = n_to_show\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset (Dataset): any dataset.\n\n Returns:\n (CheckResult): percentage of duplicates and display of the top n_to_show most duplicated.\n \"\"\"\n df: pd.DataFrame = ensure_dataframe_type(dataset)\n df = select_from_dataframe(df, self.columns, self.ignore_columns)\n\n data_columns = list(df.columns)\n\n n_samples = df.shape[0]\n\n if n_samples == 0:\n raise DeepchecksValueError('Dataset does not contain any data')\n\n group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()\n n_unique = len(group_unique_data)\n\n percent_duplicate = 1 - (1.0 * int(n_unique)) / (1.0 * int(n_samples))\n\n if percent_duplicate > 0:\n # patched for anonymous_series\n is_anonymous_series = 0 in group_unique_data.keys().names\n if is_anonymous_series:\n new_name = str(group_unique_data.keys().names)\n new_index = group_unique_data.keys()\n new_index.names = [new_name if name == 0 else name for name in new_index.names]\n group_unique_data = group_unique_data.reindex(new_index)\n duplicates_counted = group_unique_data.reset_index().rename(columns={0: 'Number of Duplicates'})\n if is_anonymous_series:\n duplicates_counted.rename(columns={new_name: 0}, inplace=True)\n\n most_duplicates = duplicates_counted[duplicates_counted['Number of Duplicates'] > 1]. \\\n nlargest(self.n_to_show, ['Number of Duplicates'])\n\n indexes = []\n for row in most_duplicates.iloc():\n indexes.append(format_list(df.index[np.all(df == row[data_columns], axis=1)].to_list()))\n\n most_duplicates['Instances'] = indexes\n\n most_duplicates = most_duplicates.set_index(['Instances', 'Number of Duplicates'])\n\n text = f'{format_percent(percent_duplicate)} of data samples are duplicates'\n explanation = 'Each row in the table shows an example of duplicate data and the number of times it appears.'\n display = [text, explanation, most_duplicates]\n\n else:\n display = None\n\n return CheckResult(value=percent_duplicate, display=display)\n\n def add_condition_ratio_not_greater_than(self, max_ratio: float = 0):\n \"\"\"Add condition - require duplicate ratio to not surpass max_ratio.\n\n Args:\n max_ratio (float): Maximum ratio of duplicates.\n \"\"\"\n def max_ratio_condition(result: float) -> ConditionResult:\n if result > max_ratio:\n return ConditionResult(False, f'Found {format_percent(result)} duplicate data',\n category=ConditionCategory.WARN)\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Duplicate data is not greater than {format_percent(max_ratio)}',\n max_ratio_condition)\n", "path": "deepchecks/checks/integrity/data_duplicates.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Data Duplicates check.\"\"\"\nfrom typing import Union, List\n\nimport pandas as pd\n\nfrom deepchecks import Dataset, ConditionResult\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck\nfrom deepchecks.errors import DeepchecksValueError\nfrom deepchecks.utils.metrics import task_type_validation, ModelType\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['LabelAmbiguity']\n\n\nclass LabelAmbiguity(SingleDatasetBaseCheck):\n \"\"\"Find samples with multiple labels.\n\n Args:\n columns (Hashable, List[Hashable]):\n List of columns to check, if none given checks\n all columns Except ignored ones.\n ignore_columns (Hashable, List[Hashable]):\n List of columns to ignore, if none given checks\n based on columns variable.\n n_to_show (int):\n number of most common ambiguous samples to show.\n \"\"\"\n\n def __init__(\n self,\n columns: Union[Hashable, List[Hashable], None] = None,\n ignore_columns: Union[Hashable, List[Hashable], None] = None,\n n_to_show: int = 5\n ):\n super().__init__()\n self.columns = columns\n self.ignore_columns = ignore_columns\n self.n_to_show = n_to_show\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset(Dataset): any dataset.\n model (any): used to check task type (default: None)\n\n Returns:\n (CheckResult): percentage of ambiguous samples and display of the top n_to_show most ambiguous.\n \"\"\"\n dataset: Dataset = Dataset.validate_dataset(dataset)\n dataset = dataset.select(self.columns, self.ignore_columns)\n\n if model:\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n elif dataset.label_type == 'regression_label':\n raise DeepchecksValueError('Task type cannot be regression')\n\n label_col = dataset.label_name\n\n # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually\n df = dataset.data\n category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()\n if category_columns:\n df = df.astype({c: 'object' for c in category_columns})\n\n group_unique_data = df.groupby(dataset.features, dropna=False)\n group_unique_labels = group_unique_data.nunique()[label_col]\n\n num_ambiguous = 0\n ambiguous_label_name = 'Observed Labels'\n display = pd.DataFrame(columns=[ambiguous_label_name, *dataset.features])\n\n for num_labels, group_data in sorted(zip(group_unique_labels, group_unique_data),\n key=lambda x: x[0], reverse=True):\n if num_labels == 1:\n break\n\n group_df = group_data[1]\n sample_values = dict(group_df[dataset.features].iloc[0])\n labels = tuple(group_df[label_col].unique())\n n_data_sample = group_df.shape[0]\n num_ambiguous += n_data_sample\n\n display = display.append({ambiguous_label_name: labels, **sample_values}, ignore_index=True)\n\n display = display.set_index(ambiguous_label_name)\n\n explanation = ('Each row in the table shows an example of a data sample '\n 'and the it\\'s observed labels as a found in the dataset.')\n\n display = None if display.empty else [explanation, display.head(self.n_to_show)]\n\n percent_ambiguous = num_ambiguous/dataset.n_samples\n\n return CheckResult(value=percent_ambiguous, display=display)\n\n def add_condition_ambiguous_sample_ratio_not_greater_than(self, max_ratio=0):\n \"\"\"Add condition - require samples with multiple labels to not be more than max_ratio.\n\n Args:\n max_ratio (float): Maximum ratio of samples with multiple labels.\n \"\"\"\n def max_ratio_condition(result: float) -> ConditionResult:\n if result > max_ratio:\n return ConditionResult(False, f'Found {format_percent(result)} samples with multiple labels')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Ambiguous sample ratio is not greater than {format_percent(max_ratio)}',\n max_ratio_condition)\n", "path": "deepchecks/checks/integrity/label_ambiguity.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Data Duplicates check.\"\"\"\nfrom typing import Union, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom deepchecks import Dataset\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult, ConditionCategory\nfrom deepchecks.utils.dataframes import select_from_dataframe\nfrom deepchecks.utils.strings import format_percent, format_list\nfrom deepchecks.utils.validation import ensure_dataframe_type\nfrom deepchecks.utils.typing import Hashable\nfrom deepchecks.errors import DeepchecksValueError\n\n\n__all__ = ['DataDuplicates']\n\n\nclass DataDuplicates(SingleDatasetBaseCheck):\n \"\"\"Search for duplicate data in dataset.\n\n Args:\n columns (Hashable, Iterable[Hashable]):\n List of columns to check, if none given checks\n all columns Except ignored ones.\n ignore_columns (Hashable, Iterable[Hashable]):\n List of columns to ignore, if none given checks\n based on columns variable.\n n_to_show (int):\n number of most common duplicated samples to show.\n \"\"\"\n\n def __init__(\n self,\n columns: Union[Hashable, List[Hashable], None] = None,\n ignore_columns: Union[Hashable, List[Hashable], None] = None,\n n_to_show: int = 5\n ):\n super().__init__()\n self.columns = columns\n self.ignore_columns = ignore_columns\n self.n_to_show = n_to_show\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset (Dataset): any dataset.\n\n Returns:\n (CheckResult): percentage of duplicates and display of the top n_to_show most duplicated.\n \"\"\"\n df: pd.DataFrame = ensure_dataframe_type(dataset)\n df = select_from_dataframe(df, self.columns, self.ignore_columns)\n\n data_columns = list(df.columns)\n\n n_samples = df.shape[0]\n\n if n_samples == 0:\n raise DeepchecksValueError('Dataset does not contain any data')\n\n # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually\n category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()\n if category_columns:\n df = df.astype({c: 'object' for c in category_columns})\n\n group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size()\n n_unique = len(group_unique_data)\n\n percent_duplicate = 1 - (1.0 * int(n_unique)) / (1.0 * int(n_samples))\n\n if percent_duplicate > 0:\n # patched for anonymous_series\n is_anonymous_series = 0 in group_unique_data.keys().names\n if is_anonymous_series:\n new_name = str(group_unique_data.keys().names)\n new_index = group_unique_data.keys()\n new_index.names = [new_name if name == 0 else name for name in new_index.names]\n group_unique_data = group_unique_data.reindex(new_index)\n duplicates_counted = group_unique_data.reset_index().rename(columns={0: 'Number of Duplicates'})\n if is_anonymous_series:\n duplicates_counted.rename(columns={new_name: 0}, inplace=True)\n\n most_duplicates = duplicates_counted[duplicates_counted['Number of Duplicates'] > 1]. \\\n nlargest(self.n_to_show, ['Number of Duplicates'])\n\n indexes = []\n for row in most_duplicates.iloc():\n indexes.append(format_list(df.index[np.all(df == row[data_columns], axis=1)].to_list()))\n\n most_duplicates['Instances'] = indexes\n\n most_duplicates = most_duplicates.set_index(['Instances', 'Number of Duplicates'])\n\n text = f'{format_percent(percent_duplicate)} of data samples are duplicates'\n explanation = 'Each row in the table shows an example of duplicate data and the number of times it appears.'\n display = [text, explanation, most_duplicates]\n\n else:\n display = None\n\n return CheckResult(value=percent_duplicate, display=display)\n\n def add_condition_ratio_not_greater_than(self, max_ratio: float = 0):\n \"\"\"Add condition - require duplicate ratio to not surpass max_ratio.\n\n Args:\n max_ratio (float): Maximum ratio of duplicates.\n \"\"\"\n def max_ratio_condition(result: float) -> ConditionResult:\n if result > max_ratio:\n return ConditionResult(False, f'Found {format_percent(result)} duplicate data',\n category=ConditionCategory.WARN)\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Duplicate data is not greater than {format_percent(max_ratio)}',\n max_ratio_condition)\n", "path": "deepchecks/checks/integrity/data_duplicates.py"}]} | 3,743 | 407 |
gh_patches_debug_24997 | rasdani/github-patches | git_diff | conan-io__conan-center-index-6597 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] lcms/2.11 static library compiled with LTO (Visual Studio 2019)
lcms built with Visual Studio is compiled with LTO/program optimization ([`/GL`](https://docs.microsoft.com/en-us/cpp/build/reference/gl-whole-program-optimization?view=msvc-160)) without an option to disable it.
The following warning is shown when linking an executable using lcms:
```
lcms2_static.lib(cmsio0.obj) : MSIL .netmodule or module compiled with /GL found; restarting link w
ith /LTCG; add /LTCG to the link command line to improve linker performance
```
A library should be compiled with LTO only when `/GL` is added to `CFLAGS`/`CXXFLAGS`.
I think the correct way of action is to patch lcms to remove adding the `/GL` compile flag.
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **lcms/2.11**
* Operating System+version: **Windows**
* Compiler+version: **Visual Studio 2019**
* Python version: **Python 3.9.6**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Windows
os_build=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
build_type=Release
[build_requires]
[options]
[env]
```
### Steps to reproduce (Include if Applicable)
Build a simple executable and link with lcms
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/lcms/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild
2 from conans.tools import Version
3 import os
4 import shutil
5
6 required_conan_version = ">=1.33.0"
7
8
9 class LcmsConan(ConanFile):
10 name = "lcms"
11 url = "https://github.com/conan-io/conan-center-index"
12 description = "A free, open source, CMM engine."
13 license = "MIT"
14 homepage = "https://github.com/mm2/Little-CMS"
15 topics = ("lcms", "cmm", "icc", "cmm-engine")
16 settings = "os", "arch", "compiler", "build_type"
17 options = {
18 "shared": [True, False],
19 "fPIC": [True, False],
20 }
21 default_options = {
22 "shared": False,
23 "fPIC": True,
24 }
25
26 _autotools = None
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 @property
33 def _settings_build(self):
34 return getattr(self, "settings_build", self.settings)
35
36 def config_options(self):
37 if self.settings.os == "Windows":
38 del self.options.fPIC
39
40 def configure(self):
41 if self.options.shared:
42 del self.options.fPIC
43 del self.settings.compiler.libcxx
44 del self.settings.compiler.cppstd
45
46 def build_requirements(self):
47 if self.settings.compiler != "Visual Studio":
48 self.build_requires("gnu-config/cci.20201022")
49 if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
50 self.build_requires("msys2/cci.latest")
51
52 def source(self):
53 tools.get(**self.conan_data["sources"][self.version],
54 destination=self._source_subfolder, strip_root=True)
55
56 def _patch_sources(self):
57 if self.settings.compiler == "Visual Studio" and Version(self.settings.compiler.version) >= "14":
58 # since VS2015 vsnprintf is built-in
59 path = os.path.join(self._source_subfolder, "src", "lcms2_internal.h")
60 tools.replace_in_file(path, "# define vsnprintf _vsnprintf", "")
61 if self.settings.os == "Android" and tools.os_info.is_windows:
62 # remove escape for quotation marks, to make ndk on windows happy
63 tools.replace_in_file(os.path.join(self._source_subfolder, "configure"),
64 "s/[ `~#$^&*(){}\\\\|;'\\\''\"<>?]/\\\\&/g",
65 "s/[ `~#$^&*(){}\\\\|;<>?]/\\\\&/g")
66
67 def _build_visual_studio(self):
68 if tools.Version(self.version) <= "2.11":
69 vc_sln_subdir = "VC2013"
70 else:
71 vc_sln_subdir = "VC2015"
72 with tools.chdir(os.path.join(self._source_subfolder, "Projects", vc_sln_subdir )):
73 target = "lcms2_DLL" if self.options.shared else "lcms2_static"
74 upgrade_project = Version(self.settings.compiler.version) > "12"
75 # run build
76 msbuild = MSBuild(self)
77 msbuild.build("lcms2.sln", targets=[target], platforms={"x86": "Win32"}, upgrade_project=upgrade_project)
78
79 def _configure_autotools(self):
80 if self._autotools:
81 return self._autotools
82 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
83 yes_no = lambda v: "yes" if v else "no"
84 args = [
85 "--enable-shared={}".format(yes_no(self.options.shared)),
86 "--enable-static={}".format(yes_no(not self.options.shared)),
87 "--without-tiff",
88 "--without-jpeg",
89 ]
90 self._autotools.configure(args=args, configure_dir=self._source_subfolder)
91 return self._autotools
92
93 @property
94 def _user_info_build(self):
95 # If using the experimental feature with different context for host and
96 # build, the 'user_info' attributes of the 'build_requires' packages
97 # will be located into the 'user_info_build' object. In other cases they
98 # will be located into the 'deps_user_info' object.
99 return getattr(self, "user_info_build", None) or self.deps_user_info
100
101 def build(self):
102 self._patch_sources()
103 if self.settings.compiler == "Visual Studio":
104 self._build_visual_studio()
105 else:
106 shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
107 os.path.join(self._source_subfolder, "config.sub"))
108 shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
109 os.path.join(self._source_subfolder, "config.guess"))
110 autotools = self._configure_autotools()
111 autotools.make()
112
113 def package(self):
114 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
115 if self.settings.compiler == "Visual Studio":
116 self.copy(pattern="*.h", src=os.path.join(self._source_subfolder, "include"), dst="include", keep_path=True)
117 if self.options.shared:
118 self.copy(pattern="*.lib", src=os.path.join(self._source_subfolder, "bin"), dst="lib", keep_path=False)
119 self.copy(pattern="*.dll", src=os.path.join(self._source_subfolder, "bin"), dst="bin", keep_path=False)
120 else:
121 self.copy(pattern="*.lib", src=os.path.join(self._source_subfolder, "Lib", "MS"), dst="lib",
122 keep_path=False)
123 else:
124 autotools = self._configure_autotools()
125 autotools.install()
126 # remove entire share directory
127 tools.rmdir(os.path.join(self.package_folder, "share"))
128 # remove pkgconfig
129 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
130 # remove la files
131 la = os.path.join(self.package_folder, "lib", "liblcms2.la")
132 if os.path.isfile(la):
133 os.unlink(la)
134 # remove binaries
135 for bin_program in ["tificc", "linkicc", "transicc", "psicc", "jpgicc"]:
136 for ext in ["", ".exe"]:
137 try:
138 os.remove(os.path.join(self.package_folder, "bin", bin_program + ext))
139 except:
140 pass
141
142 def package_info(self):
143 if self.settings.compiler == "Visual Studio":
144 self.cpp_info.libs = ["lcms2" if self.options.shared else "lcms2_static"]
145 if self.options.shared:
146 self.cpp_info.defines.append("CMS_DLL")
147 else:
148 self.cpp_info.libs = ["lcms2"]
149 self.cpp_info.names["pkg_config"] = "lcms2"
150 if self.settings.os in ("FreeBSD", "Linux"):
151 self.cpp_info.system_libs.append("m")
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/lcms/all/conanfile.py b/recipes/lcms/all/conanfile.py
--- a/recipes/lcms/all/conanfile.py
+++ b/recipes/lcms/all/conanfile.py
@@ -1,6 +1,7 @@
from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild
from conans.tools import Version
import os
+import re
import shutil
required_conan_version = ">=1.33.0"
@@ -72,9 +73,13 @@
with tools.chdir(os.path.join(self._source_subfolder, "Projects", vc_sln_subdir )):
target = "lcms2_DLL" if self.options.shared else "lcms2_static"
upgrade_project = Version(self.settings.compiler.version) > "12"
+ properties = {
+ # Enable LTO when CFLAGS contains -GL
+ "WholeProgramOptimization": "true" if any(re.finditer("(^| )[/-]GL($| )", tools.get_env("CFLAGS", ""))) else "false",
+ }
# run build
msbuild = MSBuild(self)
- msbuild.build("lcms2.sln", targets=[target], platforms={"x86": "Win32"}, upgrade_project=upgrade_project)
+ msbuild.build("lcms2.sln", targets=[target], platforms={"x86": "Win32"}, upgrade_project=upgrade_project, properties=properties)
def _configure_autotools(self):
if self._autotools:
| {"golden_diff": "diff --git a/recipes/lcms/all/conanfile.py b/recipes/lcms/all/conanfile.py\n--- a/recipes/lcms/all/conanfile.py\n+++ b/recipes/lcms/all/conanfile.py\n@@ -1,6 +1,7 @@\n from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild\n from conans.tools import Version\n import os\n+import re\n import shutil\n \n required_conan_version = \">=1.33.0\"\n@@ -72,9 +73,13 @@\n with tools.chdir(os.path.join(self._source_subfolder, \"Projects\", vc_sln_subdir )):\n target = \"lcms2_DLL\" if self.options.shared else \"lcms2_static\"\n upgrade_project = Version(self.settings.compiler.version) > \"12\"\n+ properties = {\n+ # Enable LTO when CFLAGS contains -GL\n+ \"WholeProgramOptimization\": \"true\" if any(re.finditer(\"(^| )[/-]GL($| )\", tools.get_env(\"CFLAGS\", \"\"))) else \"false\",\n+ }\n # run build\n msbuild = MSBuild(self)\n- msbuild.build(\"lcms2.sln\", targets=[target], platforms={\"x86\": \"Win32\"}, upgrade_project=upgrade_project)\n+ msbuild.build(\"lcms2.sln\", targets=[target], platforms={\"x86\": \"Win32\"}, upgrade_project=upgrade_project, properties=properties)\n \n def _configure_autotools(self):\n if self._autotools:\n", "issue": "[package] lcms/2.11 static library compiled with LTO (Visual Studio 2019)\nlcms built with Visual Studio is compiled with LTO/program optimization ([`/GL`](https://docs.microsoft.com/en-us/cpp/build/reference/gl-whole-program-optimization?view=msvc-160)) without an option to disable it.\r\nThe following warning is shown when linking an executable using lcms:\r\n```\r\nlcms2_static.lib(cmsio0.obj) : MSIL .netmodule or module compiled with /GL found; restarting link w\r\n ith /LTCG; add /LTCG to the link command line to improve linker performance\r\n```\r\nA library should be compiled with LTO only when `/GL` is added to `CFLAGS`/`CXXFLAGS`.\r\n\r\nI think the correct way of action is to patch lcms to remove adding the `/GL` compile flag.\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **lcms/2.11**\r\n * Operating System+version: **Windows**\r\n * Compiler+version: **Visual Studio 2019**\r\n * Python version: **Python 3.9.6**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\nbuild_type=Release\r\n[build_requires]\r\n[options]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nBuild a simple executable and link with lcms\r\n\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild\nfrom conans.tools import Version\nimport os\nimport shutil\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LcmsConan(ConanFile):\n name = \"lcms\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"A free, open source, CMM engine.\"\n license = \"MIT\"\n homepage = \"https://github.com/mm2/Little-CMS\"\n topics = (\"lcms\", \"cmm\", \"icc\", \"cmm-engine\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n if self.settings.compiler != \"Visual Studio\":\n self.build_requires(\"gnu-config/cci.20201022\")\n if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n if self.settings.compiler == \"Visual Studio\" and Version(self.settings.compiler.version) >= \"14\":\n # since VS2015 vsnprintf is built-in\n path = os.path.join(self._source_subfolder, \"src\", \"lcms2_internal.h\")\n tools.replace_in_file(path, \"# define vsnprintf _vsnprintf\", \"\")\n if self.settings.os == \"Android\" and tools.os_info.is_windows:\n # remove escape for quotation marks, to make ndk on windows happy\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"),\n \"s/[\t `~#$^&*(){}\\\\\\\\|;'\\\\\\''\\\"<>?]/\\\\\\\\&/g\",\n \"s/[\t `~#$^&*(){}\\\\\\\\|;<>?]/\\\\\\\\&/g\")\n\n def _build_visual_studio(self):\n if tools.Version(self.version) <= \"2.11\":\n vc_sln_subdir = \"VC2013\"\n else:\n vc_sln_subdir = \"VC2015\"\n with tools.chdir(os.path.join(self._source_subfolder, \"Projects\", vc_sln_subdir )):\n target = \"lcms2_DLL\" if self.options.shared else \"lcms2_static\"\n upgrade_project = Version(self.settings.compiler.version) > \"12\"\n # run build\n msbuild = MSBuild(self)\n msbuild.build(\"lcms2.sln\", targets=[target], platforms={\"x86\": \"Win32\"}, upgrade_project=upgrade_project)\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n yes_no = lambda v: \"yes\" if v else \"no\"\n args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n \"--without-tiff\",\n \"--without-jpeg\",\n ]\n self._autotools.configure(args=args, configure_dir=self._source_subfolder)\n return self._autotools\n\n @property\n def _user_info_build(self):\n # If using the experimental feature with different context for host and\n # build, the 'user_info' attributes of the 'build_requires' packages\n # will be located into the 'user_info_build' object. In other cases they\n # will be located into the 'deps_user_info' object.\n return getattr(self, \"user_info_build\", None) or self.deps_user_info\n\n def build(self):\n self._patch_sources()\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual_studio()\n else:\n shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB,\n os.path.join(self._source_subfolder, \"config.sub\"))\n shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS,\n os.path.join(self._source_subfolder, \"config.guess\"))\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n self.copy(pattern=\"*.h\", src=os.path.join(self._source_subfolder, \"include\"), dst=\"include\", keep_path=True)\n if self.options.shared:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.dll\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"bin\", keep_path=False)\n else:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"Lib\", \"MS\"), dst=\"lib\",\n keep_path=False)\n else:\n autotools = self._configure_autotools()\n autotools.install()\n # remove entire share directory\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n # remove pkgconfig\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n # remove la files\n la = os.path.join(self.package_folder, \"lib\", \"liblcms2.la\")\n if os.path.isfile(la):\n os.unlink(la)\n # remove binaries\n for bin_program in [\"tificc\", \"linkicc\", \"transicc\", \"psicc\", \"jpgicc\"]:\n for ext in [\"\", \".exe\"]:\n try:\n os.remove(os.path.join(self.package_folder, \"bin\", bin_program + ext))\n except:\n pass\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.libs = [\"lcms2\" if self.options.shared else \"lcms2_static\"]\n if self.options.shared:\n self.cpp_info.defines.append(\"CMS_DLL\")\n else:\n self.cpp_info.libs = [\"lcms2\"]\n self.cpp_info.names[\"pkg_config\"] = \"lcms2\"\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs.append(\"m\")\n", "path": "recipes/lcms/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild\nfrom conans.tools import Version\nimport os\nimport re\nimport shutil\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LcmsConan(ConanFile):\n name = \"lcms\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"A free, open source, CMM engine.\"\n license = \"MIT\"\n homepage = \"https://github.com/mm2/Little-CMS\"\n topics = (\"lcms\", \"cmm\", \"icc\", \"cmm-engine\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n if self.settings.compiler != \"Visual Studio\":\n self.build_requires(\"gnu-config/cci.20201022\")\n if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n if self.settings.compiler == \"Visual Studio\" and Version(self.settings.compiler.version) >= \"14\":\n # since VS2015 vsnprintf is built-in\n path = os.path.join(self._source_subfolder, \"src\", \"lcms2_internal.h\")\n tools.replace_in_file(path, \"# define vsnprintf _vsnprintf\", \"\")\n if self.settings.os == \"Android\" and tools.os_info.is_windows:\n # remove escape for quotation marks, to make ndk on windows happy\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"),\n \"s/[\t `~#$^&*(){}\\\\\\\\|;'\\\\\\''\\\"<>?]/\\\\\\\\&/g\",\n \"s/[\t `~#$^&*(){}\\\\\\\\|;<>?]/\\\\\\\\&/g\")\n\n def _build_visual_studio(self):\n if tools.Version(self.version) <= \"2.11\":\n vc_sln_subdir = \"VC2013\"\n else:\n vc_sln_subdir = \"VC2015\"\n with tools.chdir(os.path.join(self._source_subfolder, \"Projects\", vc_sln_subdir )):\n target = \"lcms2_DLL\" if self.options.shared else \"lcms2_static\"\n upgrade_project = Version(self.settings.compiler.version) > \"12\"\n properties = {\n # Enable LTO when CFLAGS contains -GL\n \"WholeProgramOptimization\": \"true\" if any(re.finditer(\"(^| )[/-]GL($| )\", tools.get_env(\"CFLAGS\", \"\"))) else \"false\",\n }\n # run build\n msbuild = MSBuild(self)\n msbuild.build(\"lcms2.sln\", targets=[target], platforms={\"x86\": \"Win32\"}, upgrade_project=upgrade_project, properties=properties)\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n yes_no = lambda v: \"yes\" if v else \"no\"\n args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n \"--without-tiff\",\n \"--without-jpeg\",\n ]\n self._autotools.configure(args=args, configure_dir=self._source_subfolder)\n return self._autotools\n\n @property\n def _user_info_build(self):\n # If using the experimental feature with different context for host and\n # build, the 'user_info' attributes of the 'build_requires' packages\n # will be located into the 'user_info_build' object. In other cases they\n # will be located into the 'deps_user_info' object.\n return getattr(self, \"user_info_build\", None) or self.deps_user_info\n\n def build(self):\n self._patch_sources()\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual_studio()\n else:\n shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB,\n os.path.join(self._source_subfolder, \"config.sub\"))\n shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS,\n os.path.join(self._source_subfolder, \"config.guess\"))\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n self.copy(pattern=\"*.h\", src=os.path.join(self._source_subfolder, \"include\"), dst=\"include\", keep_path=True)\n if self.options.shared:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.dll\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"bin\", keep_path=False)\n else:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"Lib\", \"MS\"), dst=\"lib\",\n keep_path=False)\n else:\n autotools = self._configure_autotools()\n autotools.install()\n # remove entire share directory\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n # remove pkgconfig\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n # remove la files\n la = os.path.join(self.package_folder, \"lib\", \"liblcms2.la\")\n if os.path.isfile(la):\n os.unlink(la)\n # remove binaries\n for bin_program in [\"tificc\", \"linkicc\", \"transicc\", \"psicc\", \"jpgicc\"]:\n for ext in [\"\", \".exe\"]:\n try:\n os.remove(os.path.join(self.package_folder, \"bin\", bin_program + ext))\n except:\n pass\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.libs = [\"lcms2\" if self.options.shared else \"lcms2_static\"]\n if self.options.shared:\n self.cpp_info.defines.append(\"CMS_DLL\")\n else:\n self.cpp_info.libs = [\"lcms2\"]\n self.cpp_info.names[\"pkg_config\"] = \"lcms2\"\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs.append(\"m\")\n", "path": "recipes/lcms/all/conanfile.py"}]} | 2,530 | 339 |
gh_patches_debug_4034 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Messenger - qa_data_collection task
Running the messenger qa_data_collection task, it gives me te following error:
Exception in user code:
Traceback (most recent call last):
File "/home/micol/PycharmProjects/parlai_project/ParlAI/parlai/messenger/core/messenger_manager.py", line 552, in _task_function
task_functions[world_type](self, opt, agents, conversation_id)
File "run.py", line 45, in run_conversation
task = task_class(task_opt)
File "/home/micol/PycharmProjects/parlai_project/ParlAI/parlai/tasks/squad/agents.py", line 114, in __init__
build(opt)
File "/home/micol/PycharmProjects/parlai_project/ParlAI/parlai/tasks/squad/build.py", line 35, in build
if 'fulldoc' in opt['task']:
KeyError: 'task'
"Facebook response from message send: {'recipient_id': '2172740222785536', 'message_id': 'm_rDniDdXXKp9KY2DDR7nYxf46FEm_jG446gMYoyCBkXQ1qs8d7VUug8eG31SNJbRdWO5T8BAyL5R4_1-p3tHWVQ'}"
Message data received: {'object': 'page', 'entry': [{'id': '336760520259323', 'time': 1546939725227, 'messaging': [{'sender': {'id': '336760520259323'}, 'recipient': {'id': '2172740222785536'}, 'timestamp': 1546939724784, 'message': {'is_echo': True, 'app_id': 322301978494112, 'mid': 'rDniDdXXKp9KY2DDR7nYxf46FEm_jG446gMYoyCBkXQ1qs8d7VUug8eG31SNJbRdWO5T8BAyL5R4_1-p3tHWVQ', 'seq': 32111, 'text': 'Sorry, this world closed. Returning to overworld.'}}]}]}
Message data received: {'object': 'page', 'entry': [{'id': '336760520259323', 'time': 1546939725478, 'messaging': [{'sender': {'id': '2172740222785536'}, 'recipient': {'id': '336760520259323'}, 'timestamp': 1546939725466, 'delivery': {'mids': ['rDniDdXXKp9KY2DDR7nYxf46FEm_jG446gMYoyCBkXQ1qs8d7VUug8eG31SNJbRdWO5T8BAyL5R4_1-p3tHWVQ'], 'watermark': 1546939724784, 'seq': 0}}]}]}
What is the possible solution?
Thank you
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/messenger/tasks/qa_data_collection/run.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) 2017-present, Facebook, Inc.
4 # All rights reserved.
5 # This source code is licensed under the BSD-style license found in the
6 # LICENSE file in the root directory of this source tree. An additional grant
7 # of patent rights can be found in the PATENTS file in the same directory.
8 from parlai.core.params import ParlaiParser
9 from parlai.messenger.tasks.qa_data_collection.worlds import \
10 QADataCollectionWorld
11 from parlai.messenger.core.messenger_manager import MessengerManager
12 from parlai.messenger.core.worlds import SimpleMessengerOverworld as \
13 MessengerOverworld
14 import os
15 import importlib
16
17
18 def main():
19 argparser = ParlaiParser(False, False)
20 argparser.add_parlai_data_path()
21 argparser.add_messenger_args()
22 opt = argparser.parse_args()
23 opt['task'] = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
24
25 # Initialize a SQuAD teacher agent, which we will get context from
26 module_name = 'parlai.tasks.squad.agents'
27 class_name = 'DefaultTeacher'
28 my_module = importlib.import_module(module_name)
29 task_class = getattr(my_module, class_name)
30 task_opt = {}
31 task_opt['datatype'] = 'train'
32 task_opt['datapath'] = opt['datapath']
33
34 messenger_manager = MessengerManager(opt=opt)
35 messenger_manager.setup_server()
36 messenger_manager.init_new_state()
37
38 def get_overworld(opt, agent):
39 return MessengerOverworld(opt, agent)
40
41 def assign_agent_role(agent):
42 agent[0].disp_id = 'Agent'
43
44 def run_conversation(manager, opt, agents, task_id):
45 task = task_class(task_opt)
46 agent = agents[0]
47 world = QADataCollectionWorld(
48 opt=opt,
49 task=task,
50 agent=agent
51 )
52 while not world.episode_done():
53 world.parley()
54 world.shutdown()
55
56 # World with no onboarding
57 messenger_manager.set_onboard_functions({'default': None})
58 task_functions = {'default': run_conversation}
59 assign_agent_roles = {'default': assign_agent_role}
60 messenger_manager.set_agents_required({'default': 1})
61
62 messenger_manager.set_overworld_func(get_overworld)
63 messenger_manager.setup_socket()
64 try:
65 messenger_manager.start_new_run()
66 messenger_manager.start_task(
67 assign_role_functions=assign_agent_roles,
68 task_functions=task_functions,
69 )
70 except BaseException:
71 raise
72 finally:
73 messenger_manager.shutdown()
74
75
76 if __name__ == '__main__':
77 main()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/messenger/tasks/qa_data_collection/run.py b/parlai/messenger/tasks/qa_data_collection/run.py
--- a/parlai/messenger/tasks/qa_data_collection/run.py
+++ b/parlai/messenger/tasks/qa_data_collection/run.py
@@ -27,7 +27,7 @@
class_name = 'DefaultTeacher'
my_module = importlib.import_module(module_name)
task_class = getattr(my_module, class_name)
- task_opt = {}
+ task_opt = opt.copy()
task_opt['datatype'] = 'train'
task_opt['datapath'] = opt['datapath']
| {"golden_diff": "diff --git a/parlai/messenger/tasks/qa_data_collection/run.py b/parlai/messenger/tasks/qa_data_collection/run.py\n--- a/parlai/messenger/tasks/qa_data_collection/run.py\n+++ b/parlai/messenger/tasks/qa_data_collection/run.py\n@@ -27,7 +27,7 @@\n class_name = 'DefaultTeacher'\n my_module = importlib.import_module(module_name)\n task_class = getattr(my_module, class_name)\n- task_opt = {}\n+ task_opt = opt.copy()\n task_opt['datatype'] = 'train'\n task_opt['datapath'] = opt['datapath']\n", "issue": "Messenger - qa_data_collection task \nRunning the messenger qa_data_collection task, it gives me te following error:\r\nException in user code:\r\nTraceback (most recent call last):\r\n File \"/home/micol/PycharmProjects/parlai_project/ParlAI/parlai/messenger/core/messenger_manager.py\", line 552, in _task_function\r\n task_functions[world_type](self, opt, agents, conversation_id)\r\n File \"run.py\", line 45, in run_conversation\r\n task = task_class(task_opt)\r\n File \"/home/micol/PycharmProjects/parlai_project/ParlAI/parlai/tasks/squad/agents.py\", line 114, in __init__\r\n build(opt)\r\n File \"/home/micol/PycharmProjects/parlai_project/ParlAI/parlai/tasks/squad/build.py\", line 35, in build\r\n if 'fulldoc' in opt['task']:\r\nKeyError: 'task'\r\n\"Facebook response from message send: {'recipient_id': '2172740222785536', 'message_id': 'm_rDniDdXXKp9KY2DDR7nYxf46FEm_jG446gMYoyCBkXQ1qs8d7VUug8eG31SNJbRdWO5T8BAyL5R4_1-p3tHWVQ'}\"\r\nMessage data received: {'object': 'page', 'entry': [{'id': '336760520259323', 'time': 1546939725227, 'messaging': [{'sender': {'id': '336760520259323'}, 'recipient': {'id': '2172740222785536'}, 'timestamp': 1546939724784, 'message': {'is_echo': True, 'app_id': 322301978494112, 'mid': 'rDniDdXXKp9KY2DDR7nYxf46FEm_jG446gMYoyCBkXQ1qs8d7VUug8eG31SNJbRdWO5T8BAyL5R4_1-p3tHWVQ', 'seq': 32111, 'text': 'Sorry, this world closed. Returning to overworld.'}}]}]}\r\nMessage data received: {'object': 'page', 'entry': [{'id': '336760520259323', 'time': 1546939725478, 'messaging': [{'sender': {'id': '2172740222785536'}, 'recipient': {'id': '336760520259323'}, 'timestamp': 1546939725466, 'delivery': {'mids': ['rDniDdXXKp9KY2DDR7nYxf46FEm_jG446gMYoyCBkXQ1qs8d7VUug8eG31SNJbRdWO5T8BAyL5R4_1-p3tHWVQ'], 'watermark': 1546939724784, 'seq': 0}}]}]}\r\n \r\nWhat is the possible solution?\r\n\r\nThank you\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\nfrom parlai.core.params import ParlaiParser\nfrom parlai.messenger.tasks.qa_data_collection.worlds import \\\n QADataCollectionWorld\nfrom parlai.messenger.core.messenger_manager import MessengerManager\nfrom parlai.messenger.core.worlds import SimpleMessengerOverworld as \\\n MessengerOverworld\nimport os\nimport importlib\n\n\ndef main():\n argparser = ParlaiParser(False, False)\n argparser.add_parlai_data_path()\n argparser.add_messenger_args()\n opt = argparser.parse_args()\n opt['task'] = os.path.basename(os.path.dirname(os.path.abspath(__file__)))\n\n # Initialize a SQuAD teacher agent, which we will get context from\n module_name = 'parlai.tasks.squad.agents'\n class_name = 'DefaultTeacher'\n my_module = importlib.import_module(module_name)\n task_class = getattr(my_module, class_name)\n task_opt = {}\n task_opt['datatype'] = 'train'\n task_opt['datapath'] = opt['datapath']\n\n messenger_manager = MessengerManager(opt=opt)\n messenger_manager.setup_server()\n messenger_manager.init_new_state()\n\n def get_overworld(opt, agent):\n return MessengerOverworld(opt, agent)\n\n def assign_agent_role(agent):\n agent[0].disp_id = 'Agent'\n\n def run_conversation(manager, opt, agents, task_id):\n task = task_class(task_opt)\n agent = agents[0]\n world = QADataCollectionWorld(\n opt=opt,\n task=task,\n agent=agent\n )\n while not world.episode_done():\n world.parley()\n world.shutdown()\n\n # World with no onboarding\n messenger_manager.set_onboard_functions({'default': None})\n task_functions = {'default': run_conversation}\n assign_agent_roles = {'default': assign_agent_role}\n messenger_manager.set_agents_required({'default': 1})\n\n messenger_manager.set_overworld_func(get_overworld)\n messenger_manager.setup_socket()\n try:\n messenger_manager.start_new_run()\n messenger_manager.start_task(\n assign_role_functions=assign_agent_roles,\n task_functions=task_functions,\n )\n except BaseException:\n raise\n finally:\n messenger_manager.shutdown()\n\n\nif __name__ == '__main__':\n main()\n", "path": "parlai/messenger/tasks/qa_data_collection/run.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\nfrom parlai.core.params import ParlaiParser\nfrom parlai.messenger.tasks.qa_data_collection.worlds import \\\n QADataCollectionWorld\nfrom parlai.messenger.core.messenger_manager import MessengerManager\nfrom parlai.messenger.core.worlds import SimpleMessengerOverworld as \\\n MessengerOverworld\nimport os\nimport importlib\n\n\ndef main():\n argparser = ParlaiParser(False, False)\n argparser.add_parlai_data_path()\n argparser.add_messenger_args()\n opt = argparser.parse_args()\n opt['task'] = os.path.basename(os.path.dirname(os.path.abspath(__file__)))\n\n # Initialize a SQuAD teacher agent, which we will get context from\n module_name = 'parlai.tasks.squad.agents'\n class_name = 'DefaultTeacher'\n my_module = importlib.import_module(module_name)\n task_class = getattr(my_module, class_name)\n task_opt = opt.copy()\n task_opt['datatype'] = 'train'\n task_opt['datapath'] = opt['datapath']\n\n messenger_manager = MessengerManager(opt=opt)\n messenger_manager.setup_server()\n messenger_manager.init_new_state()\n\n def get_overworld(opt, agent):\n return MessengerOverworld(opt, agent)\n\n def assign_agent_role(agent):\n agent[0].disp_id = 'Agent'\n\n def run_conversation(manager, opt, agents, task_id):\n task = task_class(task_opt)\n agent = agents[0]\n world = QADataCollectionWorld(\n opt=opt,\n task=task,\n agent=agent\n )\n while not world.episode_done():\n world.parley()\n world.shutdown()\n\n # World with no onboarding\n messenger_manager.set_onboard_functions({'default': None})\n task_functions = {'default': run_conversation}\n assign_agent_roles = {'default': assign_agent_role}\n messenger_manager.set_agents_required({'default': 1})\n\n messenger_manager.set_overworld_func(get_overworld)\n messenger_manager.setup_socket()\n try:\n messenger_manager.start_new_run()\n messenger_manager.start_task(\n assign_role_functions=assign_agent_roles,\n task_functions=task_functions,\n )\n except BaseException:\n raise\n finally:\n messenger_manager.shutdown()\n\n\nif __name__ == '__main__':\n main()\n", "path": "parlai/messenger/tasks/qa_data_collection/run.py"}]} | 1,793 | 143 |
gh_patches_debug_13636 | rasdani/github-patches | git_diff | apache__airflow-37066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Confusing log for long running tasks: "dependency 'Task Instance Not Running' FAILED: Task is in the running state"
**Apache Airflow version**: 1.10.* / 2.0.* / 2.1.*
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): Any
**Environment**:
- **Cloud provider or hardware configuration**: Any
- **OS** (e.g. from /etc/os-release): Any
- **Kernel** (e.g. `uname -a`): Any
- **Install tools**: Any
- **Others**: N/A
**What happened**:
This line in the TaskInstance log is very misleading. It seems to happen for tasks that take longer than one hour. When users are waiting for tasks to finish and see this in the log, they often get confused. They may think something is wrong with their task or with Airflow. In fact, this line is harmless. It's simply saying "the TaskInstance is already running so it cannot be run again".
```
{taskinstance.py:874} INFO - Dependencies not met for <TaskInstance: ... [running]>, dependency 'Task Instance Not Running' FAILED: Task is in the running state
{taskinstance.py:874} INFO - Dependencies not met for <TaskInstance: ... [running]>, dependency 'Task Instance State' FAILED: Task is in the 'running' state which is not a valid state for execution. The task must be cleared in order to be run.
```
**What you expected to happen**:
The confusion is unnecessary. This line should be silenced in the log. Or it should log something clearer.
**How to reproduce it**:
Any task that takes more than an hour to run has this line in the log.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/providers/celery/executors/default_celery.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """Default celery configuration."""
19 from __future__ import annotations
20
21 import json
22 import logging
23 import ssl
24
25 import re2
26
27 from airflow.configuration import conf
28 from airflow.exceptions import AirflowConfigException, AirflowException
29
30
31 def _broker_supports_visibility_timeout(url):
32 return url.startswith(("redis://", "rediss://", "sqs://", "sentinel://"))
33
34
35 log = logging.getLogger(__name__)
36
37 # IMPORTANT NOTE! Celery Executor has initialization done dynamically and it performs initialization when
38 # it is imported, so we need fallbacks here in order to be able to import the class directly without
39 # having configuration initialized before. Do not remove those fallbacks!
40 #
41 # This is not strictly needed for production:
42 #
43 # * for Airflow 2.6 and before the defaults will come from the core defaults
44 # * for Airflow 2.7+ the defaults will be loaded via ProvidersManager
45 #
46 # But it helps in our tests to import the executor class and validate if the celery code can be imported
47 # in the current and older versions of Airflow.
48
49 broker_url = conf.get("celery", "BROKER_URL", fallback="redis://redis:6379/0")
50
51 broker_transport_options: dict = conf.getsection("celery_broker_transport_options") or {}
52 if "visibility_timeout" not in broker_transport_options:
53 if _broker_supports_visibility_timeout(broker_url):
54 broker_transport_options["visibility_timeout"] = 21600
55
56 if "sentinel_kwargs" in broker_transport_options:
57 try:
58 sentinel_kwargs = json.loads(broker_transport_options["sentinel_kwargs"])
59 if not isinstance(sentinel_kwargs, dict):
60 raise ValueError
61 broker_transport_options["sentinel_kwargs"] = sentinel_kwargs
62 except Exception:
63 raise AirflowException("sentinel_kwargs should be written in the correct dictionary format.")
64
65 if conf.has_option("celery", "RESULT_BACKEND"):
66 result_backend = conf.get_mandatory_value("celery", "RESULT_BACKEND")
67 else:
68 log.debug("Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.")
69 result_backend = f'db+{conf.get("database", "SQL_ALCHEMY_CONN")}'
70
71 DEFAULT_CELERY_CONFIG = {
72 "accept_content": ["json"],
73 "event_serializer": "json",
74 "worker_prefetch_multiplier": conf.getint("celery", "worker_prefetch_multiplier", fallback=1),
75 "task_acks_late": True,
76 "task_default_queue": conf.get("operators", "DEFAULT_QUEUE"),
77 "task_default_exchange": conf.get("operators", "DEFAULT_QUEUE"),
78 "task_track_started": conf.getboolean("celery", "task_track_started", fallback=True),
79 "broker_url": broker_url,
80 "broker_transport_options": broker_transport_options,
81 "result_backend": result_backend,
82 "database_engine_options": conf.getjson(
83 "celery", "result_backend_sqlalchemy_engine_options", fallback={}
84 ),
85 "worker_concurrency": conf.getint("celery", "WORKER_CONCURRENCY", fallback=16),
86 "worker_enable_remote_control": conf.getboolean("celery", "worker_enable_remote_control", fallback=True),
87 }
88
89
90 def _get_celery_ssl_active() -> bool:
91 try:
92 return conf.getboolean("celery", "SSL_ACTIVE")
93 except AirflowConfigException:
94 log.warning("Celery Executor will run without SSL")
95 return False
96
97
98 celery_ssl_active = _get_celery_ssl_active()
99
100 try:
101 if celery_ssl_active:
102 if broker_url and "amqp://" in broker_url:
103 broker_use_ssl = {
104 "keyfile": conf.get("celery", "SSL_KEY"),
105 "certfile": conf.get("celery", "SSL_CERT"),
106 "ca_certs": conf.get("celery", "SSL_CACERT"),
107 "cert_reqs": ssl.CERT_REQUIRED,
108 }
109 elif broker_url and re2.search("rediss?://|sentinel://", broker_url):
110 broker_use_ssl = {
111 "ssl_keyfile": conf.get("celery", "SSL_KEY"),
112 "ssl_certfile": conf.get("celery", "SSL_CERT"),
113 "ssl_ca_certs": conf.get("celery", "SSL_CACERT"),
114 "ssl_cert_reqs": ssl.CERT_REQUIRED,
115 }
116 else:
117 raise AirflowException(
118 "The broker you configured does not support SSL_ACTIVE to be True. "
119 "Please use RabbitMQ or Redis if you would like to use SSL for broker."
120 )
121
122 DEFAULT_CELERY_CONFIG["broker_use_ssl"] = broker_use_ssl
123 except AirflowConfigException:
124 raise AirflowException(
125 "AirflowConfigException: SSL_ACTIVE is True, "
126 "please ensure SSL_KEY, "
127 "SSL_CERT and SSL_CACERT are set"
128 )
129 except Exception as e:
130 raise AirflowException(
131 f"Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have "
132 f"all necessary certs and key ({e})."
133 )
134
135 match_not_recommended_backend = re2.search("rediss?://|amqp://|rpc://", result_backend)
136 if match_not_recommended_backend:
137 log.warning(
138 "You have configured a result_backend using the protocol `%s`,"
139 " it is highly recommended to use an alternative result_backend (i.e. a database).",
140 match_not_recommended_backend.group(0).strip("://"),
141 )
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/providers/celery/executors/default_celery.py b/airflow/providers/celery/executors/default_celery.py
--- a/airflow/providers/celery/executors/default_celery.py
+++ b/airflow/providers/celery/executors/default_celery.py
@@ -72,7 +72,7 @@
"accept_content": ["json"],
"event_serializer": "json",
"worker_prefetch_multiplier": conf.getint("celery", "worker_prefetch_multiplier", fallback=1),
- "task_acks_late": True,
+ "task_acks_late": conf.getboolean("celery", "task_acks_late", fallback=True),
"task_default_queue": conf.get("operators", "DEFAULT_QUEUE"),
"task_default_exchange": conf.get("operators", "DEFAULT_QUEUE"),
"task_track_started": conf.getboolean("celery", "task_track_started", fallback=True),
| {"golden_diff": "diff --git a/airflow/providers/celery/executors/default_celery.py b/airflow/providers/celery/executors/default_celery.py\n--- a/airflow/providers/celery/executors/default_celery.py\n+++ b/airflow/providers/celery/executors/default_celery.py\n@@ -72,7 +72,7 @@\n \"accept_content\": [\"json\"],\n \"event_serializer\": \"json\",\n \"worker_prefetch_multiplier\": conf.getint(\"celery\", \"worker_prefetch_multiplier\", fallback=1),\n- \"task_acks_late\": True,\n+ \"task_acks_late\": conf.getboolean(\"celery\", \"task_acks_late\", fallback=True),\n \"task_default_queue\": conf.get(\"operators\", \"DEFAULT_QUEUE\"),\n \"task_default_exchange\": conf.get(\"operators\", \"DEFAULT_QUEUE\"),\n \"task_track_started\": conf.getboolean(\"celery\", \"task_track_started\", fallback=True),\n", "issue": "Confusing log for long running tasks: \"dependency 'Task Instance Not Running' FAILED: Task is in the running state\"\n**Apache Airflow version**: 1.10.* / 2.0.* / 2.1.*\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): Any\r\n\r\n**Environment**:\r\n\r\n- **Cloud provider or hardware configuration**: Any\r\n- **OS** (e.g. from /etc/os-release): Any\r\n- **Kernel** (e.g. `uname -a`): Any\r\n- **Install tools**: Any\r\n- **Others**: N/A\r\n\r\n**What happened**:\r\n\r\nThis line in the TaskInstance log is very misleading. It seems to happen for tasks that take longer than one hour. When users are waiting for tasks to finish and see this in the log, they often get confused. They may think something is wrong with their task or with Airflow. In fact, this line is harmless. It's simply saying \"the TaskInstance is already running so it cannot be run again\". \r\n\r\n```\r\n{taskinstance.py:874} INFO - Dependencies not met for <TaskInstance: ... [running]>, dependency 'Task Instance Not Running' FAILED: Task is in the running state\r\n{taskinstance.py:874} INFO - Dependencies not met for <TaskInstance: ... [running]>, dependency 'Task Instance State' FAILED: Task is in the 'running' state which is not a valid state for execution. The task must be cleared in order to be run.\r\n```\r\n\r\n**What you expected to happen**:\r\n\r\nThe confusion is unnecessary. This line should be silenced in the log. Or it should log something clearer.\r\n\r\n**How to reproduce it**:\r\n\r\nAny task that takes more than an hour to run has this line in the log.\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default celery configuration.\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport logging\nimport ssl\n\nimport re2\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException, AirflowException\n\n\ndef _broker_supports_visibility_timeout(url):\n return url.startswith((\"redis://\", \"rediss://\", \"sqs://\", \"sentinel://\"))\n\n\nlog = logging.getLogger(__name__)\n\n# IMPORTANT NOTE! Celery Executor has initialization done dynamically and it performs initialization when\n# it is imported, so we need fallbacks here in order to be able to import the class directly without\n# having configuration initialized before. Do not remove those fallbacks!\n#\n# This is not strictly needed for production:\n#\n# * for Airflow 2.6 and before the defaults will come from the core defaults\n# * for Airflow 2.7+ the defaults will be loaded via ProvidersManager\n#\n# But it helps in our tests to import the executor class and validate if the celery code can be imported\n# in the current and older versions of Airflow.\n\nbroker_url = conf.get(\"celery\", \"BROKER_URL\", fallback=\"redis://redis:6379/0\")\n\nbroker_transport_options: dict = conf.getsection(\"celery_broker_transport_options\") or {}\nif \"visibility_timeout\" not in broker_transport_options:\n if _broker_supports_visibility_timeout(broker_url):\n broker_transport_options[\"visibility_timeout\"] = 21600\n\nif \"sentinel_kwargs\" in broker_transport_options:\n try:\n sentinel_kwargs = json.loads(broker_transport_options[\"sentinel_kwargs\"])\n if not isinstance(sentinel_kwargs, dict):\n raise ValueError\n broker_transport_options[\"sentinel_kwargs\"] = sentinel_kwargs\n except Exception:\n raise AirflowException(\"sentinel_kwargs should be written in the correct dictionary format.\")\n\nif conf.has_option(\"celery\", \"RESULT_BACKEND\"):\n result_backend = conf.get_mandatory_value(\"celery\", \"RESULT_BACKEND\")\nelse:\n log.debug(\"Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.\")\n result_backend = f'db+{conf.get(\"database\", \"SQL_ALCHEMY_CONN\")}'\n\nDEFAULT_CELERY_CONFIG = {\n \"accept_content\": [\"json\"],\n \"event_serializer\": \"json\",\n \"worker_prefetch_multiplier\": conf.getint(\"celery\", \"worker_prefetch_multiplier\", fallback=1),\n \"task_acks_late\": True,\n \"task_default_queue\": conf.get(\"operators\", \"DEFAULT_QUEUE\"),\n \"task_default_exchange\": conf.get(\"operators\", \"DEFAULT_QUEUE\"),\n \"task_track_started\": conf.getboolean(\"celery\", \"task_track_started\", fallback=True),\n \"broker_url\": broker_url,\n \"broker_transport_options\": broker_transport_options,\n \"result_backend\": result_backend,\n \"database_engine_options\": conf.getjson(\n \"celery\", \"result_backend_sqlalchemy_engine_options\", fallback={}\n ),\n \"worker_concurrency\": conf.getint(\"celery\", \"WORKER_CONCURRENCY\", fallback=16),\n \"worker_enable_remote_control\": conf.getboolean(\"celery\", \"worker_enable_remote_control\", fallback=True),\n}\n\n\ndef _get_celery_ssl_active() -> bool:\n try:\n return conf.getboolean(\"celery\", \"SSL_ACTIVE\")\n except AirflowConfigException:\n log.warning(\"Celery Executor will run without SSL\")\n return False\n\n\ncelery_ssl_active = _get_celery_ssl_active()\n\ntry:\n if celery_ssl_active:\n if broker_url and \"amqp://\" in broker_url:\n broker_use_ssl = {\n \"keyfile\": conf.get(\"celery\", \"SSL_KEY\"),\n \"certfile\": conf.get(\"celery\", \"SSL_CERT\"),\n \"ca_certs\": conf.get(\"celery\", \"SSL_CACERT\"),\n \"cert_reqs\": ssl.CERT_REQUIRED,\n }\n elif broker_url and re2.search(\"rediss?://|sentinel://\", broker_url):\n broker_use_ssl = {\n \"ssl_keyfile\": conf.get(\"celery\", \"SSL_KEY\"),\n \"ssl_certfile\": conf.get(\"celery\", \"SSL_CERT\"),\n \"ssl_ca_certs\": conf.get(\"celery\", \"SSL_CACERT\"),\n \"ssl_cert_reqs\": ssl.CERT_REQUIRED,\n }\n else:\n raise AirflowException(\n \"The broker you configured does not support SSL_ACTIVE to be True. \"\n \"Please use RabbitMQ or Redis if you would like to use SSL for broker.\"\n )\n\n DEFAULT_CELERY_CONFIG[\"broker_use_ssl\"] = broker_use_ssl\nexcept AirflowConfigException:\n raise AirflowException(\n \"AirflowConfigException: SSL_ACTIVE is True, \"\n \"please ensure SSL_KEY, \"\n \"SSL_CERT and SSL_CACERT are set\"\n )\nexcept Exception as e:\n raise AirflowException(\n f\"Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have \"\n f\"all necessary certs and key ({e}).\"\n )\n\nmatch_not_recommended_backend = re2.search(\"rediss?://|amqp://|rpc://\", result_backend)\nif match_not_recommended_backend:\n log.warning(\n \"You have configured a result_backend using the protocol `%s`,\"\n \" it is highly recommended to use an alternative result_backend (i.e. a database).\",\n match_not_recommended_backend.group(0).strip(\"://\"),\n )\n", "path": "airflow/providers/celery/executors/default_celery.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default celery configuration.\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport logging\nimport ssl\n\nimport re2\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException, AirflowException\n\n\ndef _broker_supports_visibility_timeout(url):\n return url.startswith((\"redis://\", \"rediss://\", \"sqs://\", \"sentinel://\"))\n\n\nlog = logging.getLogger(__name__)\n\n# IMPORTANT NOTE! Celery Executor has initialization done dynamically and it performs initialization when\n# it is imported, so we need fallbacks here in order to be able to import the class directly without\n# having configuration initialized before. Do not remove those fallbacks!\n#\n# This is not strictly needed for production:\n#\n# * for Airflow 2.6 and before the defaults will come from the core defaults\n# * for Airflow 2.7+ the defaults will be loaded via ProvidersManager\n#\n# But it helps in our tests to import the executor class and validate if the celery code can be imported\n# in the current and older versions of Airflow.\n\nbroker_url = conf.get(\"celery\", \"BROKER_URL\", fallback=\"redis://redis:6379/0\")\n\nbroker_transport_options: dict = conf.getsection(\"celery_broker_transport_options\") or {}\nif \"visibility_timeout\" not in broker_transport_options:\n if _broker_supports_visibility_timeout(broker_url):\n broker_transport_options[\"visibility_timeout\"] = 21600\n\nif \"sentinel_kwargs\" in broker_transport_options:\n try:\n sentinel_kwargs = json.loads(broker_transport_options[\"sentinel_kwargs\"])\n if not isinstance(sentinel_kwargs, dict):\n raise ValueError\n broker_transport_options[\"sentinel_kwargs\"] = sentinel_kwargs\n except Exception:\n raise AirflowException(\"sentinel_kwargs should be written in the correct dictionary format.\")\n\nif conf.has_option(\"celery\", \"RESULT_BACKEND\"):\n result_backend = conf.get_mandatory_value(\"celery\", \"RESULT_BACKEND\")\nelse:\n log.debug(\"Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.\")\n result_backend = f'db+{conf.get(\"database\", \"SQL_ALCHEMY_CONN\")}'\n\nDEFAULT_CELERY_CONFIG = {\n \"accept_content\": [\"json\"],\n \"event_serializer\": \"json\",\n \"worker_prefetch_multiplier\": conf.getint(\"celery\", \"worker_prefetch_multiplier\", fallback=1),\n \"task_acks_late\": conf.getboolean(\"celery\", \"task_acks_late\", fallback=True),\n \"task_default_queue\": conf.get(\"operators\", \"DEFAULT_QUEUE\"),\n \"task_default_exchange\": conf.get(\"operators\", \"DEFAULT_QUEUE\"),\n \"task_track_started\": conf.getboolean(\"celery\", \"task_track_started\", fallback=True),\n \"broker_url\": broker_url,\n \"broker_transport_options\": broker_transport_options,\n \"result_backend\": result_backend,\n \"database_engine_options\": conf.getjson(\n \"celery\", \"result_backend_sqlalchemy_engine_options\", fallback={}\n ),\n \"worker_concurrency\": conf.getint(\"celery\", \"WORKER_CONCURRENCY\", fallback=16),\n \"worker_enable_remote_control\": conf.getboolean(\"celery\", \"worker_enable_remote_control\", fallback=True),\n}\n\n\ndef _get_celery_ssl_active() -> bool:\n try:\n return conf.getboolean(\"celery\", \"SSL_ACTIVE\")\n except AirflowConfigException:\n log.warning(\"Celery Executor will run without SSL\")\n return False\n\n\ncelery_ssl_active = _get_celery_ssl_active()\n\ntry:\n if celery_ssl_active:\n if broker_url and \"amqp://\" in broker_url:\n broker_use_ssl = {\n \"keyfile\": conf.get(\"celery\", \"SSL_KEY\"),\n \"certfile\": conf.get(\"celery\", \"SSL_CERT\"),\n \"ca_certs\": conf.get(\"celery\", \"SSL_CACERT\"),\n \"cert_reqs\": ssl.CERT_REQUIRED,\n }\n elif broker_url and re2.search(\"rediss?://|sentinel://\", broker_url):\n broker_use_ssl = {\n \"ssl_keyfile\": conf.get(\"celery\", \"SSL_KEY\"),\n \"ssl_certfile\": conf.get(\"celery\", \"SSL_CERT\"),\n \"ssl_ca_certs\": conf.get(\"celery\", \"SSL_CACERT\"),\n \"ssl_cert_reqs\": ssl.CERT_REQUIRED,\n }\n else:\n raise AirflowException(\n \"The broker you configured does not support SSL_ACTIVE to be True. \"\n \"Please use RabbitMQ or Redis if you would like to use SSL for broker.\"\n )\n\n DEFAULT_CELERY_CONFIG[\"broker_use_ssl\"] = broker_use_ssl\nexcept AirflowConfigException:\n raise AirflowException(\n \"AirflowConfigException: SSL_ACTIVE is True, \"\n \"please ensure SSL_KEY, \"\n \"SSL_CERT and SSL_CACERT are set\"\n )\nexcept Exception as e:\n raise AirflowException(\n f\"Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have \"\n f\"all necessary certs and key ({e}).\"\n )\n\nmatch_not_recommended_backend = re2.search(\"rediss?://|amqp://|rpc://\", result_backend)\nif match_not_recommended_backend:\n log.warning(\n \"You have configured a result_backend using the protocol `%s`,\"\n \" it is highly recommended to use an alternative result_backend (i.e. a database).\",\n match_not_recommended_backend.group(0).strip(\"://\"),\n )\n", "path": "airflow/providers/celery/executors/default_celery.py"}]} | 2,316 | 208 |
gh_patches_debug_41626 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1457 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OK scraper failing since at least 2017-02-28
State: OK - scraper has been failing since 2017-02-28
Based on automated runs it appears that OK has not run successfully in 4 days (2017-02-28).
```/usr/local/bin/billy-update ok``` | **failed during legislators**
```
Traceback (most recent call last):
File "/opt/openstates/billy/billy/bin/update.py", line 380, in main
run_record += _run_scraper(stype, args, metadata)
File "/opt/openstates/billy/billy/bin/update.py", line 104, in _run_scraper
scraper.scrape(chamber, time)
File "/srv/openstates-web/openstates/ok/legislators.py", line 62, in scrape
getattr(self, 'scrape_' + chamber + '_chamber')(term)
File "/srv/openstates-web/openstates/ok/legislators.py", line 108, in scrape_lower_chamber
party = self._parties[party_text]
KeyError: ''
```
Visit http://bobsled.openstates.org/ for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ok/legislators.py`
Content:
```
1 import re
2 import lxml
3 from billy.scrape.legislators import LegislatorScraper, Legislator
4 from openstates.utils import LXMLMixin, validate_email_address
5
6
7 class OKLegislatorScraper(LegislatorScraper, LXMLMixin):
8 jurisdiction = 'ok'
9 latest_only = True
10
11 _parties = {'R': 'Republican', 'D': 'Democratic', 'I': 'Independent'}
12
13 def _scrub(self, text):
14 """Squish whitespace and kill \xa0."""
15 return re.sub(r'[\s\xa0]+', ' ', text)
16
17 def _clean_office_info(self, office_info):
18 office_info = map(self._scrub, office_info.itertext())
19 # Throw away anything after any email address, phone number, or
20 # address lines.
21 while office_info:
22 last = office_info[-1]
23 if '@' not in last \
24 and ', OK' not in last \
25 and not re.search(r'[\d\-\(\) ]{7,}', last):
26 office_info.pop()
27 else:
28 break
29 return office_info
30
31 def _extract_phone(self, office_info):
32 phone = None
33
34 for line in office_info:
35 phone_match = re.search(r'''(\(\d{3}\) \d{3}-\d{4}|
36 \d{3}.\d{3}.\d{4})''', line)
37 if phone_match is not None:
38 phone = phone_match.group(1).strip()
39
40 return phone
41
42 def _extract_email(self, doc):
43 xpath = '//div[@class="districtheadleft"]' \
44 + '/b[contains(text(), "Email:")]' \
45 + '/../following-sibling::div' \
46 + '/script/text()'
47 script = doc.xpath(xpath)
48 if not script:
49 return ''
50 script = script[0]
51 line = filter(
52 lambda line: '+ "@" +' in line,
53 script.split('\r\n'))[0]
54 parts = re.findall(r'"(.+?)"', line)
55
56 email = ''.join(parts)
57
58 return email if validate_email_address(email) else ''
59
60
61 def scrape(self, chamber, term):
62 getattr(self, 'scrape_' + chamber + '_chamber')(term)
63
64 def scrape_lower_chamber(self, term):
65 url = "http://www.okhouse.gov/Members/Default.aspx"
66
67 page = self.lxmlize(url)
68
69 legislator_nodes = self.get_nodes(
70 page,
71 '//table[@id="ctl00_ContentPlaceHolder1_RadGrid1_ctl00"]/tbody/tr')
72
73 for legislator_node in legislator_nodes:
74 name_node = self.get_node(
75 legislator_node,
76 './/td[1]/a')
77
78 if name_node is not None:
79 name_text = name_node.text.strip()
80
81 last_name, delimiter, first_name = name_text.partition(',')
82
83 if last_name is not None and first_name is not None:
84 first_name = first_name.strip()
85 last_name = last_name.strip()
86 name = ' '.join([first_name, last_name])
87 else:
88 raise ValueError('Unable to parse name: {}'.format(
89 name_text))
90
91 if name.startswith('House District'):
92 continue
93
94 district_node = self.get_node(
95 legislator_node,
96 './/td[3]')
97
98 if district_node is not None:
99 district = district_node.text.strip()
100
101 party_node = self.get_node(
102 legislator_node,
103 './/td[4]')
104
105 if party_node is not None:
106 party_text = party_node.text.strip()
107
108 party = self._parties[party_text]
109
110 legislator_url = 'http://www.okhouse.gov/District.aspx?District=' + district
111
112 legislator_page = self.lxmlize(legislator_url)
113
114 photo_url = self.get_node(
115 legislator_page,
116 '//a[@id="ctl00_ContentPlaceHolder1_imgHiRes"]/@href')
117
118 legislator = Legislator(
119 _scraped_name=name_text,
120 full_name=name,
121 term=term,
122 chamber='lower',
123 district=district,
124 party=party,
125 photo_url=photo_url,
126 url=legislator_url
127 )
128
129 legislator.add_source(url)
130 legislator.add_source(legislator_url)
131
132 # Scrape offices.
133 self.scrape_lower_offices(legislator_page, legislator)
134
135 self.save_legislator(legislator)
136
137 def scrape_lower_offices(self, doc, legislator):
138
139 # Capitol offices:
140 xpath = '//*[contains(text(), "Capitol Address")]'
141 for bold in doc.xpath(xpath):
142
143 # Get the address.
144 address_div = bold.getparent().itersiblings().next()
145
146 # Get the room number.
147 xpath = '//*[contains(@id, "CapitolRoom")]/text()'
148 room = address_div.xpath(xpath)
149 if room:
150 parts = map(self._scrub, list(address_div.itertext()))
151 parts = [x.strip() for x in parts if x.strip()]
152 phone = parts.pop()
153 parts = [parts[0], 'Room ' + room[0], parts[-1]]
154 address = '\n'.join(parts)
155 else:
156 address = None
157 phone = None
158
159 if not phone:
160 phone = None
161
162 # Get the email address, extracted from a series of JS
163 # "document.write" lines.
164 email = self._extract_email(doc)
165
166 office = dict(name='Capitol Office', type='capitol', phone=phone,
167 address=address)
168 if email:
169 legislator['email'] = email
170 office['email'] = email
171
172 legislator.add_office(**office)
173
174 # District offices only have address, no other information
175 district_address = doc.xpath('//span[@id="ctl00_ContentPlaceHolder1_lblDistrictAddress"]/text()')
176 if district_address:
177 (district_city_state, ) = doc.xpath('//span[@id="ctl00_ContentPlaceHolder1_lblDistrictCity"]/text()')
178 district_address = "{}\n{}".format(district_address[0], district_city_state)
179
180 office = dict(name='District Office', type='district', address=district_address)
181 legislator.add_office(**office)
182
183 def scrape_upper_chamber(self, term):
184 url = "http://oksenate.gov/Senators/Default.aspx"
185 html = self.get(url).text
186 doc = lxml.html.fromstring(html)
187 doc.make_links_absolute(url)
188
189 for a in doc.xpath('//table[@summary]')[0].xpath('.//td//a[contains(@href, "biographies")]'):
190 tail = a.xpath('..')[0].tail
191 if tail:
192 district = tail.split()[1]
193 else:
194 district = a.xpath('../../span')[1].text.split()[1]
195
196 if a.text == None:
197 self.warning("District {} appears to be empty".format(district))
198 continue
199 else:
200 match = re.match(r'(.+) \(([A-Z])\)', a.text.strip())
201 name, party = match.group(1), self._parties[match.group(2)]
202
203 url = a.get('href')
204
205 leg = Legislator(term, 'upper', district, name.strip(), party=party, url=url)
206 leg.add_source(url)
207 self.scrape_upper_offices(leg, url)
208 self.save_legislator(leg)
209
210
211
212 def scrape_upper_offices(self, legislator, url):
213 url = url.replace('aspx', 'html')
214 html = self.get(url).text
215 legislator.add_source(url)
216 doc = lxml.html.fromstring(html)
217 doc.make_links_absolute(url)
218
219 xpath = '//h3[contains(., "Office")]'
220 for table in doc.xpath(xpath)[0].itersiblings():
221 if table.tag == 'table':
222 break
223 col1, col2 = table.xpath('tr[2]/td')
224 lxml.etree.strip_tags(col1, 'sup')
225 lxml.etree.strip_tags(col2, 'sup')
226
227 capitol_office_info = self._clean_office_info(col1)
228
229 # Set email on the leg object.
230 if '@' in capitol_office_info[-1]:
231 email = capitol_office_info.pop()
232 legislator['email'] = email
233 else:
234 email = None
235
236 capitol_phone = self._extract_phone(capitol_office_info)
237
238 capitol_address_lines = map(
239 lambda line: line.strip(),
240 filter(
241 lambda string: re.search(r', OK|Lincoln Blvd|Room \d', string),
242 capitol_office_info))
243
244 office = dict(
245 name='Capitol Office',
246 type='capitol',
247 address='\n'.join(capitol_address_lines),
248 fax=None,
249 email=email,
250 phone=capitol_phone)
251
252 legislator.add_office(**office)
253
254 district_office_info = self._clean_office_info(col2)
255
256 # This probably isn't a valid district office at less than two lines.
257 if len(district_office_info) < 2:
258 return
259
260 district_address_lines = []
261 for line in district_office_info:
262 district_address_lines.append(line.strip())
263 if 'OK' in line:
264 break
265
266 if 'OK' in district_address_lines[-1]:
267 district_address = '\n'.join(filter(lambda line: line,
268 district_address_lines))
269 else:
270 district_address = None
271 #self.logger.debug(district_address)
272
273 district_phone = self._extract_phone(district_office_info)
274
275 office = dict(
276 name='District Office',
277 type='district',
278 address=district_address,
279 fax=None,
280 email=None,
281 phone=district_phone)
282
283 legislator.add_office(**office)
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/ok/legislators.py b/openstates/ok/legislators.py
--- a/openstates/ok/legislators.py
+++ b/openstates/ok/legislators.py
@@ -20,9 +20,9 @@
# address lines.
while office_info:
last = office_info[-1]
- if '@' not in last \
- and ', OK' not in last \
- and not re.search(r'[\d\-\(\) ]{7,}', last):
+ if ('@' not in last
+ and ', OK' not in last
+ and not re.search(r'[\d\-\(\) ]{7,}', last)):
office_info.pop()
else:
break
@@ -57,7 +57,6 @@
return email if validate_email_address(email) else ''
-
def scrape(self, chamber, term):
getattr(self, 'scrape_' + chamber + '_chamber')(term)
@@ -78,6 +77,10 @@
if name_node is not None:
name_text = name_node.text.strip()
+ # Handle seats with no current representative
+ if re.search(r'District \d+', name_text):
+ continue
+
last_name, delimiter, first_name = name_text.partition(',')
if last_name is not None and first_name is not None:
@@ -174,7 +177,7 @@
# District offices only have address, no other information
district_address = doc.xpath('//span[@id="ctl00_ContentPlaceHolder1_lblDistrictAddress"]/text()')
if district_address:
- (district_city_state, ) = doc.xpath('//span[@id="ctl00_ContentPlaceHolder1_lblDistrictCity"]/text()')
+ district_city_state, = doc.xpath('//span[@id="ctl00_ContentPlaceHolder1_lblDistrictCity"]/text()')
district_address = "{}\n{}".format(district_address[0], district_city_state)
office = dict(name='District Office', type='district', address=district_address)
@@ -193,7 +196,7 @@
else:
district = a.xpath('../../span')[1].text.split()[1]
- if a.text == None:
+ if a.text is None:
self.warning("District {} appears to be empty".format(district))
continue
else:
@@ -207,8 +210,6 @@
self.scrape_upper_offices(leg, url)
self.save_legislator(leg)
-
-
def scrape_upper_offices(self, legislator, url):
url = url.replace('aspx', 'html')
html = self.get(url).text
@@ -268,7 +269,7 @@
district_address_lines))
else:
district_address = None
- #self.logger.debug(district_address)
+ # self.logger.debug(district_address)
district_phone = self._extract_phone(district_office_info)
| {"golden_diff": "diff --git a/openstates/ok/legislators.py b/openstates/ok/legislators.py\n--- a/openstates/ok/legislators.py\n+++ b/openstates/ok/legislators.py\n@@ -20,9 +20,9 @@\n # address lines.\n while office_info:\n last = office_info[-1]\n- if '@' not in last \\\n- and ', OK' not in last \\\n- and not re.search(r'[\\d\\-\\(\\) ]{7,}', last):\n+ if ('@' not in last\n+ and ', OK' not in last\n+ and not re.search(r'[\\d\\-\\(\\) ]{7,}', last)):\n office_info.pop()\n else:\n break\n@@ -57,7 +57,6 @@\n \n return email if validate_email_address(email) else ''\n \n-\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber + '_chamber')(term)\n \n@@ -78,6 +77,10 @@\n if name_node is not None:\n name_text = name_node.text.strip()\n \n+ # Handle seats with no current representative\n+ if re.search(r'District \\d+', name_text):\n+ continue\n+\n last_name, delimiter, first_name = name_text.partition(',')\n \n if last_name is not None and first_name is not None:\n@@ -174,7 +177,7 @@\n # District offices only have address, no other information\n district_address = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictAddress\"]/text()')\n if district_address:\n- (district_city_state, ) = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictCity\"]/text()')\n+ district_city_state, = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictCity\"]/text()')\n district_address = \"{}\\n{}\".format(district_address[0], district_city_state)\n \n office = dict(name='District Office', type='district', address=district_address)\n@@ -193,7 +196,7 @@\n else:\n district = a.xpath('../../span')[1].text.split()[1]\n \n- if a.text == None:\n+ if a.text is None:\n self.warning(\"District {} appears to be empty\".format(district))\n continue\n else:\n@@ -207,8 +210,6 @@\n self.scrape_upper_offices(leg, url)\n self.save_legislator(leg)\n \n-\n-\n def scrape_upper_offices(self, legislator, url):\n url = url.replace('aspx', 'html')\n html = self.get(url).text\n@@ -268,7 +269,7 @@\n district_address_lines))\n else:\n district_address = None\n- #self.logger.debug(district_address)\n+ # self.logger.debug(district_address)\n \n district_phone = self._extract_phone(district_office_info)\n", "issue": "OK scraper failing since at least 2017-02-28\nState: OK - scraper has been failing since 2017-02-28\n\nBased on automated runs it appears that OK has not run successfully in 4 days (2017-02-28).\n\n```/usr/local/bin/billy-update ok``` | **failed during legislators**\n\n```\n Traceback (most recent call last):\n File \"/opt/openstates/billy/billy/bin/update.py\", line 380, in main\n run_record += _run_scraper(stype, args, metadata)\n File \"/opt/openstates/billy/billy/bin/update.py\", line 104, in _run_scraper\n scraper.scrape(chamber, time)\n File \"/srv/openstates-web/openstates/ok/legislators.py\", line 62, in scrape\n getattr(self, 'scrape_' + chamber + '_chamber')(term)\n File \"/srv/openstates-web/openstates/ok/legislators.py\", line 108, in scrape_lower_chamber\n party = self._parties[party_text]\nKeyError: ''\n\n```\n\nVisit http://bobsled.openstates.org/ for more info.\n\n", "before_files": [{"content": "import re\nimport lxml\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin, validate_email_address\n\n\nclass OKLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ok'\n latest_only = True\n\n _parties = {'R': 'Republican', 'D': 'Democratic', 'I': 'Independent'}\n\n def _scrub(self, text):\n \"\"\"Squish whitespace and kill \\xa0.\"\"\"\n return re.sub(r'[\\s\\xa0]+', ' ', text)\n\n def _clean_office_info(self, office_info):\n office_info = map(self._scrub, office_info.itertext())\n # Throw away anything after any email address, phone number, or\n # address lines.\n while office_info:\n last = office_info[-1]\n if '@' not in last \\\n and ', OK' not in last \\\n and not re.search(r'[\\d\\-\\(\\) ]{7,}', last):\n office_info.pop()\n else:\n break\n return office_info\n\n def _extract_phone(self, office_info):\n phone = None\n\n for line in office_info:\n phone_match = re.search(r'''(\\(\\d{3}\\) \\d{3}-\\d{4}|\n \\d{3}.\\d{3}.\\d{4})''', line)\n if phone_match is not None:\n phone = phone_match.group(1).strip()\n\n return phone\n\n def _extract_email(self, doc):\n xpath = '//div[@class=\"districtheadleft\"]' \\\n + '/b[contains(text(), \"Email:\")]' \\\n + '/../following-sibling::div' \\\n + '/script/text()'\n script = doc.xpath(xpath)\n if not script:\n return ''\n script = script[0]\n line = filter(\n lambda line: '+ \"@\" +' in line,\n script.split('\\r\\n'))[0]\n parts = re.findall(r'\"(.+?)\"', line)\n\n email = ''.join(parts)\n\n return email if validate_email_address(email) else ''\n\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber + '_chamber')(term)\n\n def scrape_lower_chamber(self, term):\n url = \"http://www.okhouse.gov/Members/Default.aspx\"\n\n page = self.lxmlize(url)\n\n legislator_nodes = self.get_nodes(\n page,\n '//table[@id=\"ctl00_ContentPlaceHolder1_RadGrid1_ctl00\"]/tbody/tr')\n\n for legislator_node in legislator_nodes:\n name_node = self.get_node(\n legislator_node,\n './/td[1]/a')\n\n if name_node is not None:\n name_text = name_node.text.strip()\n\n last_name, delimiter, first_name = name_text.partition(',')\n\n if last_name is not None and first_name is not None:\n first_name = first_name.strip()\n last_name = last_name.strip()\n name = ' '.join([first_name, last_name])\n else:\n raise ValueError('Unable to parse name: {}'.format(\n name_text))\n\n if name.startswith('House District'):\n continue\n\n district_node = self.get_node(\n legislator_node,\n './/td[3]')\n\n if district_node is not None:\n district = district_node.text.strip()\n\n party_node = self.get_node(\n legislator_node,\n './/td[4]')\n\n if party_node is not None:\n party_text = party_node.text.strip()\n\n party = self._parties[party_text]\n\n legislator_url = 'http://www.okhouse.gov/District.aspx?District=' + district\n\n legislator_page = self.lxmlize(legislator_url)\n\n photo_url = self.get_node(\n legislator_page,\n '//a[@id=\"ctl00_ContentPlaceHolder1_imgHiRes\"]/@href')\n\n legislator = Legislator(\n _scraped_name=name_text,\n full_name=name,\n term=term,\n chamber='lower',\n district=district,\n party=party,\n photo_url=photo_url,\n url=legislator_url\n )\n\n legislator.add_source(url)\n legislator.add_source(legislator_url)\n\n # Scrape offices.\n self.scrape_lower_offices(legislator_page, legislator)\n\n self.save_legislator(legislator)\n\n def scrape_lower_offices(self, doc, legislator):\n\n # Capitol offices:\n xpath = '//*[contains(text(), \"Capitol Address\")]'\n for bold in doc.xpath(xpath):\n\n # Get the address.\n address_div = bold.getparent().itersiblings().next()\n\n # Get the room number.\n xpath = '//*[contains(@id, \"CapitolRoom\")]/text()'\n room = address_div.xpath(xpath)\n if room:\n parts = map(self._scrub, list(address_div.itertext()))\n parts = [x.strip() for x in parts if x.strip()]\n phone = parts.pop()\n parts = [parts[0], 'Room ' + room[0], parts[-1]]\n address = '\\n'.join(parts)\n else:\n address = None\n phone = None\n\n if not phone:\n phone = None\n\n # Get the email address, extracted from a series of JS\n # \"document.write\" lines.\n email = self._extract_email(doc)\n\n office = dict(name='Capitol Office', type='capitol', phone=phone,\n address=address)\n if email:\n legislator['email'] = email\n office['email'] = email\n\n legislator.add_office(**office)\n\n # District offices only have address, no other information\n district_address = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictAddress\"]/text()')\n if district_address:\n (district_city_state, ) = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictCity\"]/text()')\n district_address = \"{}\\n{}\".format(district_address[0], district_city_state)\n\n office = dict(name='District Office', type='district', address=district_address)\n legislator.add_office(**office)\n\n def scrape_upper_chamber(self, term):\n url = \"http://oksenate.gov/Senators/Default.aspx\"\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n for a in doc.xpath('//table[@summary]')[0].xpath('.//td//a[contains(@href, \"biographies\")]'):\n tail = a.xpath('..')[0].tail\n if tail:\n district = tail.split()[1]\n else:\n district = a.xpath('../../span')[1].text.split()[1]\n\n if a.text == None:\n self.warning(\"District {} appears to be empty\".format(district))\n continue\n else:\n match = re.match(r'(.+) \\(([A-Z])\\)', a.text.strip())\n name, party = match.group(1), self._parties[match.group(2)]\n\n url = a.get('href')\n\n leg = Legislator(term, 'upper', district, name.strip(), party=party, url=url)\n leg.add_source(url)\n self.scrape_upper_offices(leg, url)\n self.save_legislator(leg)\n\n\n\n def scrape_upper_offices(self, legislator, url):\n url = url.replace('aspx', 'html')\n html = self.get(url).text\n legislator.add_source(url)\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n xpath = '//h3[contains(., \"Office\")]'\n for table in doc.xpath(xpath)[0].itersiblings():\n if table.tag == 'table':\n break\n col1, col2 = table.xpath('tr[2]/td')\n lxml.etree.strip_tags(col1, 'sup')\n lxml.etree.strip_tags(col2, 'sup')\n\n capitol_office_info = self._clean_office_info(col1)\n\n # Set email on the leg object.\n if '@' in capitol_office_info[-1]:\n email = capitol_office_info.pop()\n legislator['email'] = email\n else:\n email = None\n\n capitol_phone = self._extract_phone(capitol_office_info)\n\n capitol_address_lines = map(\n lambda line: line.strip(),\n filter(\n lambda string: re.search(r', OK|Lincoln Blvd|Room \\d', string),\n capitol_office_info))\n\n office = dict(\n name='Capitol Office',\n type='capitol',\n address='\\n'.join(capitol_address_lines),\n fax=None,\n email=email,\n phone=capitol_phone)\n\n legislator.add_office(**office)\n\n district_office_info = self._clean_office_info(col2)\n\n # This probably isn't a valid district office at less than two lines.\n if len(district_office_info) < 2:\n return\n\n district_address_lines = []\n for line in district_office_info:\n district_address_lines.append(line.strip())\n if 'OK' in line:\n break\n\n if 'OK' in district_address_lines[-1]:\n district_address = '\\n'.join(filter(lambda line: line,\n district_address_lines))\n else:\n district_address = None\n #self.logger.debug(district_address)\n\n district_phone = self._extract_phone(district_office_info)\n\n office = dict(\n name='District Office',\n type='district',\n address=district_address,\n fax=None,\n email=None,\n phone=district_phone)\n\n legislator.add_office(**office)\n", "path": "openstates/ok/legislators.py"}], "after_files": [{"content": "import re\nimport lxml\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin, validate_email_address\n\n\nclass OKLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ok'\n latest_only = True\n\n _parties = {'R': 'Republican', 'D': 'Democratic', 'I': 'Independent'}\n\n def _scrub(self, text):\n \"\"\"Squish whitespace and kill \\xa0.\"\"\"\n return re.sub(r'[\\s\\xa0]+', ' ', text)\n\n def _clean_office_info(self, office_info):\n office_info = map(self._scrub, office_info.itertext())\n # Throw away anything after any email address, phone number, or\n # address lines.\n while office_info:\n last = office_info[-1]\n if ('@' not in last\n and ', OK' not in last\n and not re.search(r'[\\d\\-\\(\\) ]{7,}', last)):\n office_info.pop()\n else:\n break\n return office_info\n\n def _extract_phone(self, office_info):\n phone = None\n\n for line in office_info:\n phone_match = re.search(r'''(\\(\\d{3}\\) \\d{3}-\\d{4}|\n \\d{3}.\\d{3}.\\d{4})''', line)\n if phone_match is not None:\n phone = phone_match.group(1).strip()\n\n return phone\n\n def _extract_email(self, doc):\n xpath = '//div[@class=\"districtheadleft\"]' \\\n + '/b[contains(text(), \"Email:\")]' \\\n + '/../following-sibling::div' \\\n + '/script/text()'\n script = doc.xpath(xpath)\n if not script:\n return ''\n script = script[0]\n line = filter(\n lambda line: '+ \"@\" +' in line,\n script.split('\\r\\n'))[0]\n parts = re.findall(r'\"(.+?)\"', line)\n\n email = ''.join(parts)\n\n return email if validate_email_address(email) else ''\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber + '_chamber')(term)\n\n def scrape_lower_chamber(self, term):\n url = \"http://www.okhouse.gov/Members/Default.aspx\"\n\n page = self.lxmlize(url)\n\n legislator_nodes = self.get_nodes(\n page,\n '//table[@id=\"ctl00_ContentPlaceHolder1_RadGrid1_ctl00\"]/tbody/tr')\n\n for legislator_node in legislator_nodes:\n name_node = self.get_node(\n legislator_node,\n './/td[1]/a')\n\n if name_node is not None:\n name_text = name_node.text.strip()\n\n # Handle seats with no current representative\n if re.search(r'District \\d+', name_text):\n continue\n\n last_name, delimiter, first_name = name_text.partition(',')\n\n if last_name is not None and first_name is not None:\n first_name = first_name.strip()\n last_name = last_name.strip()\n name = ' '.join([first_name, last_name])\n else:\n raise ValueError('Unable to parse name: {}'.format(\n name_text))\n\n if name.startswith('House District'):\n continue\n\n district_node = self.get_node(\n legislator_node,\n './/td[3]')\n\n if district_node is not None:\n district = district_node.text.strip()\n\n party_node = self.get_node(\n legislator_node,\n './/td[4]')\n\n if party_node is not None:\n party_text = party_node.text.strip()\n\n party = self._parties[party_text]\n\n legislator_url = 'http://www.okhouse.gov/District.aspx?District=' + district\n\n legislator_page = self.lxmlize(legislator_url)\n\n photo_url = self.get_node(\n legislator_page,\n '//a[@id=\"ctl00_ContentPlaceHolder1_imgHiRes\"]/@href')\n\n legislator = Legislator(\n _scraped_name=name_text,\n full_name=name,\n term=term,\n chamber='lower',\n district=district,\n party=party,\n photo_url=photo_url,\n url=legislator_url\n )\n\n legislator.add_source(url)\n legislator.add_source(legislator_url)\n\n # Scrape offices.\n self.scrape_lower_offices(legislator_page, legislator)\n\n self.save_legislator(legislator)\n\n def scrape_lower_offices(self, doc, legislator):\n\n # Capitol offices:\n xpath = '//*[contains(text(), \"Capitol Address\")]'\n for bold in doc.xpath(xpath):\n\n # Get the address.\n address_div = bold.getparent().itersiblings().next()\n\n # Get the room number.\n xpath = '//*[contains(@id, \"CapitolRoom\")]/text()'\n room = address_div.xpath(xpath)\n if room:\n parts = map(self._scrub, list(address_div.itertext()))\n parts = [x.strip() for x in parts if x.strip()]\n phone = parts.pop()\n parts = [parts[0], 'Room ' + room[0], parts[-1]]\n address = '\\n'.join(parts)\n else:\n address = None\n phone = None\n\n if not phone:\n phone = None\n\n # Get the email address, extracted from a series of JS\n # \"document.write\" lines.\n email = self._extract_email(doc)\n\n office = dict(name='Capitol Office', type='capitol', phone=phone,\n address=address)\n if email:\n legislator['email'] = email\n office['email'] = email\n\n legislator.add_office(**office)\n\n # District offices only have address, no other information\n district_address = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictAddress\"]/text()')\n if district_address:\n district_city_state, = doc.xpath('//span[@id=\"ctl00_ContentPlaceHolder1_lblDistrictCity\"]/text()')\n district_address = \"{}\\n{}\".format(district_address[0], district_city_state)\n\n office = dict(name='District Office', type='district', address=district_address)\n legislator.add_office(**office)\n\n def scrape_upper_chamber(self, term):\n url = \"http://oksenate.gov/Senators/Default.aspx\"\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n for a in doc.xpath('//table[@summary]')[0].xpath('.//td//a[contains(@href, \"biographies\")]'):\n tail = a.xpath('..')[0].tail\n if tail:\n district = tail.split()[1]\n else:\n district = a.xpath('../../span')[1].text.split()[1]\n\n if a.text is None:\n self.warning(\"District {} appears to be empty\".format(district))\n continue\n else:\n match = re.match(r'(.+) \\(([A-Z])\\)', a.text.strip())\n name, party = match.group(1), self._parties[match.group(2)]\n\n url = a.get('href')\n\n leg = Legislator(term, 'upper', district, name.strip(), party=party, url=url)\n leg.add_source(url)\n self.scrape_upper_offices(leg, url)\n self.save_legislator(leg)\n\n def scrape_upper_offices(self, legislator, url):\n url = url.replace('aspx', 'html')\n html = self.get(url).text\n legislator.add_source(url)\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n xpath = '//h3[contains(., \"Office\")]'\n for table in doc.xpath(xpath)[0].itersiblings():\n if table.tag == 'table':\n break\n col1, col2 = table.xpath('tr[2]/td')\n lxml.etree.strip_tags(col1, 'sup')\n lxml.etree.strip_tags(col2, 'sup')\n\n capitol_office_info = self._clean_office_info(col1)\n\n # Set email on the leg object.\n if '@' in capitol_office_info[-1]:\n email = capitol_office_info.pop()\n legislator['email'] = email\n else:\n email = None\n\n capitol_phone = self._extract_phone(capitol_office_info)\n\n capitol_address_lines = map(\n lambda line: line.strip(),\n filter(\n lambda string: re.search(r', OK|Lincoln Blvd|Room \\d', string),\n capitol_office_info))\n\n office = dict(\n name='Capitol Office',\n type='capitol',\n address='\\n'.join(capitol_address_lines),\n fax=None,\n email=email,\n phone=capitol_phone)\n\n legislator.add_office(**office)\n\n district_office_info = self._clean_office_info(col2)\n\n # This probably isn't a valid district office at less than two lines.\n if len(district_office_info) < 2:\n return\n\n district_address_lines = []\n for line in district_office_info:\n district_address_lines.append(line.strip())\n if 'OK' in line:\n break\n\n if 'OK' in district_address_lines[-1]:\n district_address = '\\n'.join(filter(lambda line: line,\n district_address_lines))\n else:\n district_address = None\n # self.logger.debug(district_address)\n\n district_phone = self._extract_phone(district_office_info)\n\n office = dict(\n name='District Office',\n type='district',\n address=district_address,\n fax=None,\n email=None,\n phone=district_phone)\n\n legislator.add_office(**office)\n", "path": "openstates/ok/legislators.py"}]} | 3,421 | 668 |
gh_patches_debug_24038 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2840 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add North Sea Link (GB<->NO-NO2)
National Grid and Statnett are building an interconnector ([1400MW](https://northsealink.com/)) between Great Britain and [South-West Norway](https://northsealink.com/en/locations/norway/) (should be NO-NO2 based on physical location).
Installation of subsea cables is only [planned](https://northsealink.com/en/the-project/project-timeline/) to be finished in 2021, but apparently the interconnector has already been added to ELEXON and possibly [broken](/tmrowco/electricitymap-contrib/pull/2828) that [parser](/tmrowco/electricitymap-contrib/issues/2829).
So, it seems to be possible to add the exchange to ElectricityMap. I can't find it in ENTSO-E yet though, so if we want to use that one instead of ELEXON we'll have to wait.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/ELEXON.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 """
5 Parser that uses the ELEXON API to return the following data types.
6
7 Production
8 Exchanges
9
10 Documentation:
11 https://www.elexon.co.uk/wp-content/uploads/2017/06/
12 bmrs_api_data_push_user_guide_v1.1.pdf
13 """
14
15 import os
16 import arrow
17 import logging
18 import requests
19 import datetime as dt
20 import pandas as pd
21 from io import StringIO
22
23 from .lib.validation import validate
24
25 ELEXON_ENDPOINT = 'https://api.bmreports.com/BMRS/{}/v1'
26
27 REPORT_META = {
28 'B1620': {
29 'expected_fields': 13,
30 'skiprows': 5
31 },
32 'FUELINST': {
33 'expected_fields': 22,
34 'skiprows': 1
35 },
36 'INTERFUELHH': {
37 'expected_fields': 8,
38 'skiprows': 0
39 }
40 }
41
42 # 'hydro' key is for hydro production
43 # 'hydro storage' key is for hydro storage
44 RESOURCE_TYPE_TO_FUEL = {
45 'Biomass': 'biomass',
46 'Fossil Gas': 'gas',
47 'Fossil Hard coal': 'coal',
48 'Fossil Oil': 'oil',
49 'Hydro Pumped Storage': 'hydro storage',
50 'Hydro Run-of-river and poundage': 'hydro',
51 'Nuclear': 'nuclear',
52 'Solar': 'solar',
53 'Wind Onshore': 'wind',
54 'Wind Offshore': 'wind',
55 'Other': 'unknown'
56 }
57
58 EXCHANGES = {
59 'FR->GB': 3,
60 'GB-NIR->IE': 4,
61 'GB->NL': 5,
62 'GB->IE': 6,
63 'BE->GB': 7
64 }
65
66 FETCH_WIND_FROM_FUELINST = True
67
68
69 def query_ELEXON(report, session, params):
70 if 'ELEXON_TOKEN' not in os.environ:
71 raise Exception('No ELEXON_TOKEN found! Please add it to secrets.env!')
72 params['APIKey'] = os.environ['ELEXON_TOKEN']
73 return session.get(ELEXON_ENDPOINT.format(report), params=params)
74
75
76 def query_exchange(session, target_datetime=None):
77 if target_datetime is None:
78 target_datetime = dt.date.today()
79
80 from_date = (target_datetime - dt.timedelta(days=1)).strftime('%Y-%m-%d')
81 to_date = target_datetime.strftime('%Y-%m-%d')
82
83 params = {
84 'FromDate': from_date,
85 'ToDate': to_date,
86 'ServiceType': 'csv'
87 }
88 response = query_ELEXON('INTERFUELHH', session, params)
89 return response.text
90
91
92 def query_production(session, target_datetime=None):
93 if target_datetime is None:
94 target_datetime = dt.datetime.now()
95
96 # we can only fetch one date at a time.
97 # if target_datetime is first 30 minutes of the day fetch the day before.
98 # otherwise fetch the day of target_datetime.
99 if target_datetime.time() <= dt.time(0, 30):
100 settlement_date = target_datetime.date() - dt.timedelta(1)
101 else:
102 settlement_date = target_datetime.date()
103
104 params = {
105 'SettlementDate': settlement_date.strftime('%Y-%m-%d'),
106 'Period': '*',
107 'ServiceType': 'csv'
108 }
109 response = query_ELEXON('B1620', session, params)
110 return response.text
111
112
113 def parse_exchange(zone_key1, zone_key2, csv_text, target_datetime=None,
114 logger=logging.getLogger(__name__)):
115 if not csv_text:
116 return None
117
118 report = REPORT_META['INTERFUELHH']
119
120 sorted_zone_keys = sorted([zone_key1, zone_key2])
121 exchange = '->'.join(sorted_zone_keys)
122 data_points = list()
123 lines = csv_text.split('\n')
124
125 # check field count in report is as expected
126 field_count = len(lines[1].split(','))
127 if field_count != report['expected_fields']:
128 raise ValueError(
129 'Expected {} fields in INTERFUELHH report, got {}'.format(
130 report['expected_fields'], field_count))
131
132 for line in lines[1:-1]:
133 fields = line.split(',')
134
135 # settlement date / period combinations are always local time
136 date = dt.datetime.strptime(fields[1], '%Y%m%d').date()
137 settlement_period = int(fields[2])
138 datetime = datetime_from_date_sp(date, settlement_period)
139
140 data = {
141 'sortedZoneKeys': exchange,
142 'datetime': datetime,
143 'source': 'bmreports.com'
144 }
145
146 # positive value implies import to GB
147 multiplier = -1 if 'GB' in sorted_zone_keys[0] else 1
148 data['netFlow'] = float(fields[EXCHANGES[exchange]]) * multiplier
149 data_points.append(data)
150
151 return data_points
152
153
154 def parse_production(csv_text, target_datetime=None,
155 logger=logging.getLogger(__name__)):
156 if not csv_text:
157 return None
158
159 report = REPORT_META['B1620']
160
161 # create DataFrame from slice of CSV rows
162 df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'] - 1)
163
164 # check field count in report is as expected
165 field_count = len(df.columns)
166 if field_count != report['expected_fields']:
167 raise ValueError(
168 'Expected {} fields in B1620 report, got {}'.format(
169 report['expected_fields'], len(df.columns)))
170
171 # filter out undesired columns
172 df = df.iloc[:-1, [7, 8, 9, 4]]
173
174 df['Settlement Date'] = df['Settlement Date'].apply(
175 lambda x: dt.datetime.strptime(x, '%Y-%m-%d'))
176 df['Settlement Period'] = df['Settlement Period'].astype(int)
177 df['datetime'] = df.apply(lambda x: datetime_from_date_sp(
178 x['Settlement Date'], x['Settlement Period']), axis=1)
179
180 # map from report fuel names to electricitymap fuel names
181 fuel_column = 'Power System Resource Type'
182 df[fuel_column] = df[fuel_column].apply(lambda x: RESOURCE_TYPE_TO_FUEL[x])
183
184 # loop through unique datetimes and create each data point
185 data_points = list()
186 for time in pd.unique(df['datetime']):
187 time_df = df[df['datetime'] == time]
188
189 data_point = {
190 'zoneKey': 'GB',
191 'datetime': time.to_pydatetime(),
192 'source': 'bmreports.com',
193 'production': dict(),
194 'storage': dict()
195 }
196
197 for row in time_df.iterrows():
198 fields = row[1].to_dict()
199 fuel = fields[fuel_column]
200 quantity = fields['Quantity']
201
202 # check if storage value and if so correct key
203 if 'storage' in fuel:
204 fuel_key = fuel.replace('storage', '').strip()
205 # ELEXON storage is negative when storing and positive when
206 # discharging (the opposite to electricitymap)
207 data_point['storage'][fuel_key] = quantity * -1
208 else:
209 # if/else structure allows summation of multiple quantities
210 # e.g. 'Wind Onshore' and 'Wind Offshore' both have the
211 # key 'wind' here.
212 if fuel in data_point['production'].keys():
213 data_point['production'][fuel] += quantity
214 else:
215 data_point['production'][fuel] = quantity
216
217 data_points.append(data_point)
218
219 return data_points
220
221
222 def datetime_from_date_sp(date, sp):
223 datetime = arrow.get(date).shift(minutes=30 * (sp - 1))
224 return datetime.replace(tzinfo='Europe/London').datetime
225
226
227 def _fetch_wind(target_datetime=None):
228 if target_datetime is None:
229 target_datetime = dt.datetime.now()
230
231 # line up with B1620 (main production report) search range
232 d = target_datetime.date()
233 start = d - dt.timedelta(hours=24)
234 end = dt.datetime.combine(d + dt.timedelta(days=1), dt.time(0))
235
236 session = requests.session()
237 params = {
238 'FromDateTime': start.strftime('%Y-%m-%d %H:%M:%S'),
239 'ToDateTime': end.strftime('%Y-%m-%d %H:%M:%S'),
240 'ServiceType': 'csv'
241 }
242 response = query_ELEXON('FUELINST', session, params)
243 csv_text = response.text
244
245 report = REPORT_META['FUELINST']
246 df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'],
247 skipfooter=1, header=None)
248
249 field_count = len(df.columns)
250 if field_count != report['expected_fields']:
251 raise ValueError(
252 'Expected {} fields in FUELINST report, got {}'.format(
253 report['expected_fields'], len(df.columns)))
254
255 df = df.iloc[:, [1, 2, 3, 8]]
256 df.columns = ['Settlement Date', 'Settlement Period', 'published', 'Wind']
257 df['Settlement Date'] = df['Settlement Date'].apply(
258 lambda x: dt.datetime.strptime(str(x), '%Y%m%d'))
259 df['Settlement Period'] = df['Settlement Period'].astype(int)
260 df['datetime'] = df.apply(lambda x: datetime_from_date_sp(
261 x['Settlement Date'], x['Settlement Period']), axis=1)
262
263 df['published'] = df['published'].apply(
264 lambda x: dt.datetime.strptime(str(x), '%Y%m%d%H%M%S'))
265 # get the most recently published value for each datetime
266 idx = df.groupby('datetime')['published'].transform(max) == df['published']
267 df = df[idx]
268
269 return df[['datetime', 'Wind']]
270
271
272 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None,
273 logger=logging.getLogger(__name__)):
274 session = session or requests.session()
275 response = query_exchange(session, target_datetime)
276 data = parse_exchange(zone_key1, zone_key2, response, target_datetime,
277 logger)
278 return data
279
280
281 def fetch_production(zone_key='GB', session=None, target_datetime=None,
282 logger=logging.getLogger(__name__)):
283 session = session or requests.session()
284 response = query_production(session, target_datetime)
285 data = parse_production(response, target_datetime, logger)
286
287 # At times B1620 has had poor quality data for wind so fetch from FUELINST
288 if FETCH_WIND_FROM_FUELINST:
289 wind = _fetch_wind(target_datetime)
290 for entry in data:
291 datetime = entry['datetime']
292 wind_row = wind[wind['datetime'] == datetime]
293 if len(wind_row):
294 entry['production']['wind'] = wind_row.iloc[0]['Wind']
295 else:
296 entry['production']['wind'] = None
297
298 required = ['coal', 'gas', 'nuclear']
299 expected_range = {
300 'coal': (0, 10000),
301 'gas': (100, 30000),
302 'nuclear': (100, 20000)
303 }
304 data = [x for x in data
305 if validate(
306 x, logger, required=required, expected_range=expected_range)]
307
308 return data
309
310
311 if __name__ == '__main__':
312 """Main method, never used by the Electricity Map backend, but handy
313 for testing."""
314
315 print('fetch_production() ->')
316 print(fetch_production())
317
318 print('fetch_exchange(FR, GB) ->')
319 print(fetch_exchange('FR', 'GB'))
320
321 print('fetch_exchange(GB, IE) ->')
322 print(fetch_exchange('GB', 'IE'))
323
324 print('fetch_exchange(GB, NL) ->')
325 print(fetch_exchange('GB', 'NL'))
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/ELEXON.py b/parsers/ELEXON.py
--- a/parsers/ELEXON.py
+++ b/parsers/ELEXON.py
@@ -34,7 +34,7 @@
'skiprows': 1
},
'INTERFUELHH': {
- 'expected_fields': 8,
+ 'expected_fields': 11,
'skiprows': 0
}
}
@@ -56,11 +56,12 @@
}
EXCHANGES = {
- 'FR->GB': 3,
- 'GB-NIR->IE': 4,
- 'GB->NL': 5,
- 'GB->IE': 6,
- 'BE->GB': 7
+ 'FR->GB': [3, 8, 9], # IFA, Eleclink, IFA2
+ 'GB->GB-NIR': [4],
+ 'GB->NL': [5],
+ 'GB->IE': [6],
+ 'BE->GB': [7],
+ 'GB->NO-NO2': [10], # North Sea Link
}
FETCH_WIND_FROM_FUELINST = True
@@ -145,7 +146,13 @@
# positive value implies import to GB
multiplier = -1 if 'GB' in sorted_zone_keys[0] else 1
- data['netFlow'] = float(fields[EXCHANGES[exchange]]) * multiplier
+ net_flow = 0.0 # init
+ for column_index in EXCHANGES[exchange]:
+ # read out all columns providing values for this exchange
+ if fields[column_index] == "":
+ continue # no value provided for this exchange
+ net_flow += float(fields[column_index]) * multiplier
+ data['netFlow'] = net_flow
data_points.append(data)
return data_points
| {"golden_diff": "diff --git a/parsers/ELEXON.py b/parsers/ELEXON.py\n--- a/parsers/ELEXON.py\n+++ b/parsers/ELEXON.py\n@@ -34,7 +34,7 @@\n 'skiprows': 1\n },\n 'INTERFUELHH': {\n- 'expected_fields': 8,\n+ 'expected_fields': 11,\n 'skiprows': 0\n }\n }\n@@ -56,11 +56,12 @@\n }\n \n EXCHANGES = {\n- 'FR->GB': 3,\n- 'GB-NIR->IE': 4,\n- 'GB->NL': 5,\n- 'GB->IE': 6,\n- 'BE->GB': 7\n+ 'FR->GB': [3, 8, 9], # IFA, Eleclink, IFA2\n+ 'GB->GB-NIR': [4],\n+ 'GB->NL': [5],\n+ 'GB->IE': [6],\n+ 'BE->GB': [7],\n+ 'GB->NO-NO2': [10], # North Sea Link\n }\n \n FETCH_WIND_FROM_FUELINST = True\n@@ -145,7 +146,13 @@\n \n # positive value implies import to GB\n multiplier = -1 if 'GB' in sorted_zone_keys[0] else 1\n- data['netFlow'] = float(fields[EXCHANGES[exchange]]) * multiplier\n+ net_flow = 0.0 # init\n+ for column_index in EXCHANGES[exchange]:\n+ # read out all columns providing values for this exchange\n+ if fields[column_index] == \"\":\n+ continue # no value provided for this exchange\n+ net_flow += float(fields[column_index]) * multiplier\n+ data['netFlow'] = net_flow\n data_points.append(data)\n \n return data_points\n", "issue": "Add North Sea Link (GB<->NO-NO2)\nNational Grid and Statnett are building an interconnector ([1400MW](https://northsealink.com/)) between Great Britain and [South-West Norway](https://northsealink.com/en/locations/norway/) (should be NO-NO2 based on physical location).\r\n\r\nInstallation of subsea cables is only [planned](https://northsealink.com/en/the-project/project-timeline/) to be finished in 2021, but apparently the interconnector has already been added to ELEXON and possibly [broken](/tmrowco/electricitymap-contrib/pull/2828) that [parser](/tmrowco/electricitymap-contrib/issues/2829).\r\n\r\nSo, it seems to be possible to add the exchange to ElectricityMap. I can't find it in ENTSO-E yet though, so if we want to use that one instead of ELEXON we'll have to wait.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"\nParser that uses the ELEXON API to return the following data types.\n\nProduction\nExchanges\n\nDocumentation:\nhttps://www.elexon.co.uk/wp-content/uploads/2017/06/\nbmrs_api_data_push_user_guide_v1.1.pdf\n\"\"\"\n\nimport os\nimport arrow\nimport logging\nimport requests\nimport datetime as dt\nimport pandas as pd\nfrom io import StringIO\n\nfrom .lib.validation import validate\n\nELEXON_ENDPOINT = 'https://api.bmreports.com/BMRS/{}/v1'\n\nREPORT_META = {\n 'B1620': {\n 'expected_fields': 13,\n 'skiprows': 5\n },\n 'FUELINST': {\n 'expected_fields': 22,\n 'skiprows': 1\n },\n 'INTERFUELHH': {\n 'expected_fields': 8,\n 'skiprows': 0\n }\n}\n\n# 'hydro' key is for hydro production\n# 'hydro storage' key is for hydro storage\nRESOURCE_TYPE_TO_FUEL = {\n 'Biomass': 'biomass',\n 'Fossil Gas': 'gas',\n 'Fossil Hard coal': 'coal',\n 'Fossil Oil': 'oil',\n 'Hydro Pumped Storage': 'hydro storage',\n 'Hydro Run-of-river and poundage': 'hydro',\n 'Nuclear': 'nuclear',\n 'Solar': 'solar',\n 'Wind Onshore': 'wind',\n 'Wind Offshore': 'wind',\n 'Other': 'unknown'\n}\n\nEXCHANGES = {\n 'FR->GB': 3,\n 'GB-NIR->IE': 4,\n 'GB->NL': 5,\n 'GB->IE': 6,\n 'BE->GB': 7\n}\n\nFETCH_WIND_FROM_FUELINST = True\n\n\ndef query_ELEXON(report, session, params):\n if 'ELEXON_TOKEN' not in os.environ:\n raise Exception('No ELEXON_TOKEN found! Please add it to secrets.env!')\n params['APIKey'] = os.environ['ELEXON_TOKEN']\n return session.get(ELEXON_ENDPOINT.format(report), params=params)\n\n\ndef query_exchange(session, target_datetime=None):\n if target_datetime is None:\n target_datetime = dt.date.today()\n\n from_date = (target_datetime - dt.timedelta(days=1)).strftime('%Y-%m-%d')\n to_date = target_datetime.strftime('%Y-%m-%d')\n\n params = {\n 'FromDate': from_date,\n 'ToDate': to_date,\n 'ServiceType': 'csv'\n }\n response = query_ELEXON('INTERFUELHH', session, params)\n return response.text\n\n\ndef query_production(session, target_datetime=None):\n if target_datetime is None:\n target_datetime = dt.datetime.now()\n\n # we can only fetch one date at a time.\n # if target_datetime is first 30 minutes of the day fetch the day before.\n # otherwise fetch the day of target_datetime.\n if target_datetime.time() <= dt.time(0, 30):\n settlement_date = target_datetime.date() - dt.timedelta(1)\n else:\n settlement_date = target_datetime.date()\n\n params = {\n 'SettlementDate': settlement_date.strftime('%Y-%m-%d'),\n 'Period': '*',\n 'ServiceType': 'csv'\n }\n response = query_ELEXON('B1620', session, params)\n return response.text\n\n\ndef parse_exchange(zone_key1, zone_key2, csv_text, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if not csv_text:\n return None\n\n report = REPORT_META['INTERFUELHH']\n\n sorted_zone_keys = sorted([zone_key1, zone_key2])\n exchange = '->'.join(sorted_zone_keys)\n data_points = list()\n lines = csv_text.split('\\n')\n\n # check field count in report is as expected\n field_count = len(lines[1].split(','))\n if field_count != report['expected_fields']:\n raise ValueError(\n 'Expected {} fields in INTERFUELHH report, got {}'.format(\n report['expected_fields'], field_count))\n\n for line in lines[1:-1]:\n fields = line.split(',')\n\n # settlement date / period combinations are always local time\n date = dt.datetime.strptime(fields[1], '%Y%m%d').date()\n settlement_period = int(fields[2])\n datetime = datetime_from_date_sp(date, settlement_period)\n\n data = {\n 'sortedZoneKeys': exchange,\n 'datetime': datetime,\n 'source': 'bmreports.com'\n }\n\n # positive value implies import to GB\n multiplier = -1 if 'GB' in sorted_zone_keys[0] else 1\n data['netFlow'] = float(fields[EXCHANGES[exchange]]) * multiplier\n data_points.append(data)\n\n return data_points\n\n\ndef parse_production(csv_text, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if not csv_text:\n return None\n\n report = REPORT_META['B1620']\n\n # create DataFrame from slice of CSV rows\n df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'] - 1)\n\n # check field count in report is as expected\n field_count = len(df.columns)\n if field_count != report['expected_fields']:\n raise ValueError(\n 'Expected {} fields in B1620 report, got {}'.format(\n report['expected_fields'], len(df.columns)))\n\n # filter out undesired columns\n df = df.iloc[:-1, [7, 8, 9, 4]]\n\n df['Settlement Date'] = df['Settlement Date'].apply(\n lambda x: dt.datetime.strptime(x, '%Y-%m-%d'))\n df['Settlement Period'] = df['Settlement Period'].astype(int)\n df['datetime'] = df.apply(lambda x: datetime_from_date_sp(\n x['Settlement Date'], x['Settlement Period']), axis=1)\n\n # map from report fuel names to electricitymap fuel names\n fuel_column = 'Power System Resource Type'\n df[fuel_column] = df[fuel_column].apply(lambda x: RESOURCE_TYPE_TO_FUEL[x])\n\n # loop through unique datetimes and create each data point\n data_points = list()\n for time in pd.unique(df['datetime']):\n time_df = df[df['datetime'] == time]\n\n data_point = {\n 'zoneKey': 'GB',\n 'datetime': time.to_pydatetime(),\n 'source': 'bmreports.com',\n 'production': dict(),\n 'storage': dict()\n }\n\n for row in time_df.iterrows():\n fields = row[1].to_dict()\n fuel = fields[fuel_column]\n quantity = fields['Quantity']\n\n # check if storage value and if so correct key\n if 'storage' in fuel:\n fuel_key = fuel.replace('storage', '').strip()\n # ELEXON storage is negative when storing and positive when\n # discharging (the opposite to electricitymap)\n data_point['storage'][fuel_key] = quantity * -1\n else:\n # if/else structure allows summation of multiple quantities\n # e.g. 'Wind Onshore' and 'Wind Offshore' both have the\n # key 'wind' here.\n if fuel in data_point['production'].keys():\n data_point['production'][fuel] += quantity\n else:\n data_point['production'][fuel] = quantity\n\n data_points.append(data_point)\n\n return data_points\n\n\ndef datetime_from_date_sp(date, sp):\n datetime = arrow.get(date).shift(minutes=30 * (sp - 1))\n return datetime.replace(tzinfo='Europe/London').datetime\n\n\ndef _fetch_wind(target_datetime=None):\n if target_datetime is None:\n target_datetime = dt.datetime.now()\n\n # line up with B1620 (main production report) search range\n d = target_datetime.date()\n start = d - dt.timedelta(hours=24)\n end = dt.datetime.combine(d + dt.timedelta(days=1), dt.time(0))\n\n session = requests.session()\n params = {\n 'FromDateTime': start.strftime('%Y-%m-%d %H:%M:%S'),\n 'ToDateTime': end.strftime('%Y-%m-%d %H:%M:%S'),\n 'ServiceType': 'csv'\n }\n response = query_ELEXON('FUELINST', session, params)\n csv_text = response.text\n\n report = REPORT_META['FUELINST']\n df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'],\n skipfooter=1, header=None)\n\n field_count = len(df.columns)\n if field_count != report['expected_fields']:\n raise ValueError(\n 'Expected {} fields in FUELINST report, got {}'.format(\n report['expected_fields'], len(df.columns)))\n\n df = df.iloc[:, [1, 2, 3, 8]]\n df.columns = ['Settlement Date', 'Settlement Period', 'published', 'Wind']\n df['Settlement Date'] = df['Settlement Date'].apply(\n lambda x: dt.datetime.strptime(str(x), '%Y%m%d'))\n df['Settlement Period'] = df['Settlement Period'].astype(int)\n df['datetime'] = df.apply(lambda x: datetime_from_date_sp(\n x['Settlement Date'], x['Settlement Period']), axis=1)\n\n df['published'] = df['published'].apply(\n lambda x: dt.datetime.strptime(str(x), '%Y%m%d%H%M%S'))\n # get the most recently published value for each datetime\n idx = df.groupby('datetime')['published'].transform(max) == df['published']\n df = df[idx]\n\n return df[['datetime', 'Wind']]\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n session = session or requests.session()\n response = query_exchange(session, target_datetime)\n data = parse_exchange(zone_key1, zone_key2, response, target_datetime,\n logger)\n return data\n\n\ndef fetch_production(zone_key='GB', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n session = session or requests.session()\n response = query_production(session, target_datetime)\n data = parse_production(response, target_datetime, logger)\n\n # At times B1620 has had poor quality data for wind so fetch from FUELINST\n if FETCH_WIND_FROM_FUELINST:\n wind = _fetch_wind(target_datetime)\n for entry in data:\n datetime = entry['datetime']\n wind_row = wind[wind['datetime'] == datetime]\n if len(wind_row):\n entry['production']['wind'] = wind_row.iloc[0]['Wind']\n else:\n entry['production']['wind'] = None\n\n required = ['coal', 'gas', 'nuclear']\n expected_range = {\n 'coal': (0, 10000),\n 'gas': (100, 30000),\n 'nuclear': (100, 20000)\n }\n data = [x for x in data\n if validate(\n x, logger, required=required, expected_range=expected_range)]\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy \n for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_exchange(FR, GB) ->')\n print(fetch_exchange('FR', 'GB'))\n\n print('fetch_exchange(GB, IE) ->')\n print(fetch_exchange('GB', 'IE'))\n\n print('fetch_exchange(GB, NL) ->')\n print(fetch_exchange('GB', 'NL'))\n", "path": "parsers/ELEXON.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"\nParser that uses the ELEXON API to return the following data types.\n\nProduction\nExchanges\n\nDocumentation:\nhttps://www.elexon.co.uk/wp-content/uploads/2017/06/\nbmrs_api_data_push_user_guide_v1.1.pdf\n\"\"\"\n\nimport os\nimport arrow\nimport logging\nimport requests\nimport datetime as dt\nimport pandas as pd\nfrom io import StringIO\n\nfrom .lib.validation import validate\n\nELEXON_ENDPOINT = 'https://api.bmreports.com/BMRS/{}/v1'\n\nREPORT_META = {\n 'B1620': {\n 'expected_fields': 13,\n 'skiprows': 5\n },\n 'FUELINST': {\n 'expected_fields': 22,\n 'skiprows': 1\n },\n 'INTERFUELHH': {\n 'expected_fields': 11,\n 'skiprows': 0\n }\n}\n\n# 'hydro' key is for hydro production\n# 'hydro storage' key is for hydro storage\nRESOURCE_TYPE_TO_FUEL = {\n 'Biomass': 'biomass',\n 'Fossil Gas': 'gas',\n 'Fossil Hard coal': 'coal',\n 'Fossil Oil': 'oil',\n 'Hydro Pumped Storage': 'hydro storage',\n 'Hydro Run-of-river and poundage': 'hydro',\n 'Nuclear': 'nuclear',\n 'Solar': 'solar',\n 'Wind Onshore': 'wind',\n 'Wind Offshore': 'wind',\n 'Other': 'unknown'\n}\n\nEXCHANGES = {\n 'FR->GB': [3, 8, 9], # IFA, Eleclink, IFA2\n 'GB->GB-NIR': [4],\n 'GB->NL': [5],\n 'GB->IE': [6],\n 'BE->GB': [7],\n 'GB->NO-NO2': [10], # North Sea Link\n}\n\nFETCH_WIND_FROM_FUELINST = True\n\n\ndef query_ELEXON(report, session, params):\n if 'ELEXON_TOKEN' not in os.environ:\n raise Exception('No ELEXON_TOKEN found! Please add it to secrets.env!')\n params['APIKey'] = os.environ['ELEXON_TOKEN']\n return session.get(ELEXON_ENDPOINT.format(report), params=params)\n\n\ndef query_exchange(session, target_datetime=None):\n if target_datetime is None:\n target_datetime = dt.date.today()\n\n from_date = (target_datetime - dt.timedelta(days=1)).strftime('%Y-%m-%d')\n to_date = target_datetime.strftime('%Y-%m-%d')\n\n params = {\n 'FromDate': from_date,\n 'ToDate': to_date,\n 'ServiceType': 'csv'\n }\n response = query_ELEXON('INTERFUELHH', session, params)\n return response.text\n\n\ndef query_production(session, target_datetime=None):\n if target_datetime is None:\n target_datetime = dt.datetime.now()\n\n # we can only fetch one date at a time.\n # if target_datetime is first 30 minutes of the day fetch the day before.\n # otherwise fetch the day of target_datetime.\n if target_datetime.time() <= dt.time(0, 30):\n settlement_date = target_datetime.date() - dt.timedelta(1)\n else:\n settlement_date = target_datetime.date()\n\n params = {\n 'SettlementDate': settlement_date.strftime('%Y-%m-%d'),\n 'Period': '*',\n 'ServiceType': 'csv'\n }\n response = query_ELEXON('B1620', session, params)\n return response.text\n\n\ndef parse_exchange(zone_key1, zone_key2, csv_text, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if not csv_text:\n return None\n\n report = REPORT_META['INTERFUELHH']\n\n sorted_zone_keys = sorted([zone_key1, zone_key2])\n exchange = '->'.join(sorted_zone_keys)\n data_points = list()\n lines = csv_text.split('\\n')\n\n # check field count in report is as expected\n field_count = len(lines[1].split(','))\n if field_count != report['expected_fields']:\n raise ValueError(\n 'Expected {} fields in INTERFUELHH report, got {}'.format(\n report['expected_fields'], field_count))\n\n for line in lines[1:-1]:\n fields = line.split(',')\n\n # settlement date / period combinations are always local time\n date = dt.datetime.strptime(fields[1], '%Y%m%d').date()\n settlement_period = int(fields[2])\n datetime = datetime_from_date_sp(date, settlement_period)\n\n data = {\n 'sortedZoneKeys': exchange,\n 'datetime': datetime,\n 'source': 'bmreports.com'\n }\n\n # positive value implies import to GB\n multiplier = -1 if 'GB' in sorted_zone_keys[0] else 1\n net_flow = 0.0 # init\n for column_index in EXCHANGES[exchange]:\n # read out all columns providing values for this exchange\n if fields[column_index] == \"\":\n continue # no value provided for this exchange\n net_flow += float(fields[column_index]) * multiplier\n data['netFlow'] = net_flow\n data_points.append(data)\n\n return data_points\n\n\ndef parse_production(csv_text, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if not csv_text:\n return None\n\n report = REPORT_META['B1620']\n\n # create DataFrame from slice of CSV rows\n df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'] - 1)\n\n # check field count in report is as expected\n field_count = len(df.columns)\n if field_count != report['expected_fields']:\n raise ValueError(\n 'Expected {} fields in B1620 report, got {}'.format(\n report['expected_fields'], len(df.columns)))\n\n # filter out undesired columns\n df = df.iloc[:-1, [7, 8, 9, 4]]\n\n df['Settlement Date'] = df['Settlement Date'].apply(\n lambda x: dt.datetime.strptime(x, '%Y-%m-%d'))\n df['Settlement Period'] = df['Settlement Period'].astype(int)\n df['datetime'] = df.apply(lambda x: datetime_from_date_sp(\n x['Settlement Date'], x['Settlement Period']), axis=1)\n\n # map from report fuel names to electricitymap fuel names\n fuel_column = 'Power System Resource Type'\n df[fuel_column] = df[fuel_column].apply(lambda x: RESOURCE_TYPE_TO_FUEL[x])\n\n # loop through unique datetimes and create each data point\n data_points = list()\n for time in pd.unique(df['datetime']):\n time_df = df[df['datetime'] == time]\n\n data_point = {\n 'zoneKey': 'GB',\n 'datetime': time.to_pydatetime(),\n 'source': 'bmreports.com',\n 'production': dict(),\n 'storage': dict()\n }\n\n for row in time_df.iterrows():\n fields = row[1].to_dict()\n fuel = fields[fuel_column]\n quantity = fields['Quantity']\n\n # check if storage value and if so correct key\n if 'storage' in fuel:\n fuel_key = fuel.replace('storage', '').strip()\n # ELEXON storage is negative when storing and positive when\n # discharging (the opposite to electricitymap)\n data_point['storage'][fuel_key] = quantity * -1\n else:\n # if/else structure allows summation of multiple quantities\n # e.g. 'Wind Onshore' and 'Wind Offshore' both have the\n # key 'wind' here.\n if fuel in data_point['production'].keys():\n data_point['production'][fuel] += quantity\n else:\n data_point['production'][fuel] = quantity\n\n data_points.append(data_point)\n\n return data_points\n\n\ndef datetime_from_date_sp(date, sp):\n datetime = arrow.get(date).shift(minutes=30 * (sp - 1))\n return datetime.replace(tzinfo='Europe/London').datetime\n\n\ndef _fetch_wind(target_datetime=None):\n if target_datetime is None:\n target_datetime = dt.datetime.now()\n\n # line up with B1620 (main production report) search range\n d = target_datetime.date()\n start = d - dt.timedelta(hours=24)\n end = dt.datetime.combine(d + dt.timedelta(days=1), dt.time(0))\n\n session = requests.session()\n params = {\n 'FromDateTime': start.strftime('%Y-%m-%d %H:%M:%S'),\n 'ToDateTime': end.strftime('%Y-%m-%d %H:%M:%S'),\n 'ServiceType': 'csv'\n }\n response = query_ELEXON('FUELINST', session, params)\n csv_text = response.text\n\n report = REPORT_META['FUELINST']\n df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'],\n skipfooter=1, header=None)\n\n field_count = len(df.columns)\n if field_count != report['expected_fields']:\n raise ValueError(\n 'Expected {} fields in FUELINST report, got {}'.format(\n report['expected_fields'], len(df.columns)))\n\n df = df.iloc[:, [1, 2, 3, 8]]\n df.columns = ['Settlement Date', 'Settlement Period', 'published', 'Wind']\n df['Settlement Date'] = df['Settlement Date'].apply(\n lambda x: dt.datetime.strptime(str(x), '%Y%m%d'))\n df['Settlement Period'] = df['Settlement Period'].astype(int)\n df['datetime'] = df.apply(lambda x: datetime_from_date_sp(\n x['Settlement Date'], x['Settlement Period']), axis=1)\n\n df['published'] = df['published'].apply(\n lambda x: dt.datetime.strptime(str(x), '%Y%m%d%H%M%S'))\n # get the most recently published value for each datetime\n idx = df.groupby('datetime')['published'].transform(max) == df['published']\n df = df[idx]\n\n return df[['datetime', 'Wind']]\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n session = session or requests.session()\n response = query_exchange(session, target_datetime)\n data = parse_exchange(zone_key1, zone_key2, response, target_datetime,\n logger)\n return data\n\n\ndef fetch_production(zone_key='GB', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n session = session or requests.session()\n response = query_production(session, target_datetime)\n data = parse_production(response, target_datetime, logger)\n\n # At times B1620 has had poor quality data for wind so fetch from FUELINST\n if FETCH_WIND_FROM_FUELINST:\n wind = _fetch_wind(target_datetime)\n for entry in data:\n datetime = entry['datetime']\n wind_row = wind[wind['datetime'] == datetime]\n if len(wind_row):\n entry['production']['wind'] = wind_row.iloc[0]['Wind']\n else:\n entry['production']['wind'] = None\n\n required = ['coal', 'gas', 'nuclear']\n expected_range = {\n 'coal': (0, 10000),\n 'gas': (100, 30000),\n 'nuclear': (100, 20000)\n }\n data = [x for x in data\n if validate(\n x, logger, required=required, expected_range=expected_range)]\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy \n for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_exchange(FR, GB) ->')\n print(fetch_exchange('FR', 'GB'))\n\n print('fetch_exchange(GB, IE) ->')\n print(fetch_exchange('GB', 'IE'))\n\n print('fetch_exchange(GB, NL) ->')\n print(fetch_exchange('GB', 'NL'))\n", "path": "parsers/ELEXON.py"}]} | 3,964 | 433 |
gh_patches_debug_36930 | rasdani/github-patches | git_diff | kedro-org__kedro-1761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve Jupyter workflow when configuration is broken
## Description
> Is your feature request related to a problem? A clear and concise description of what the problem is: "I'm always frustrated when ..."
Using the spaceflights demo on 0.18.1 if I open a `kedro jupyter notebook` session and write `catalog.list()` I will get the following:

If I break the `catalog.yml` file so that it is unparsable and run `kedro catalog list` (or any other command like `kedro run` I will get a helpful parse error in the terminal:
<img width="654" alt="image" src="https://user-images.githubusercontent.com/35801847/172139490-a079095e-52d7-4c80-a8d3-15c5a675239e.png">
However, if I start a `kedro jupyter notebook` session in this state, I get no warning or messaging that Kedro is unable to create the `catalog`, `session` and other global variables. As a user I get this confusing state where the `catalog` is no longer available to me:
<img width="636" alt="image" src="https://user-images.githubusercontent.com/35801847/172139614-8dabbc13-38a4-4785-9afe-bbddf77b974a.png">
## Context
> Why is this change important to you? How would you use it? How can it benefit other users?
I think novice users can get stuck here with no direction as to why the catalog is not available.
## Possible Implementation
Surface warning to the jupyter context, perhaps even make a dummy `catalog`, `context`, `session` objects available that expose the log message when called...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/extras/extensions/ipython.py`
Content:
```
1 # pylint: disable=import-outside-toplevel,global-statement,invalid-name
2 """
3 This script creates an IPython extension to load Kedro-related variables in
4 local scope.
5 """
6 import logging
7 import sys
8 from pathlib import Path
9 from typing import Any, Dict
10
11 from IPython import get_ipython
12 from IPython.core.magic import needs_local_scope, register_line_magic
13
14 logger = logging.getLogger(__name__)
15 default_project_path = Path.cwd()
16
17
18 def _remove_cached_modules(package_name):
19 to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]
20 # `del` is used instead of `reload()` because: If the new version of a module does not
21 # define a name that was defined by the old version, the old definition remains.
22 for module in to_remove:
23 del sys.modules[module] # pragma: no cover
24
25
26 def _find_kedro_project(current_dir: Path): # pragma: no cover
27 from kedro.framework.startup import _is_project
28
29 while current_dir != current_dir.parent:
30 if _is_project(current_dir):
31 return current_dir
32 current_dir = current_dir.parent
33
34 return None
35
36
37 def reload_kedro(
38 path: str = None, env: str = None, extra_params: Dict[str, Any] = None
39 ):
40 """Line magic which reloads all Kedro default variables.
41 Setting the path will also make it default for subsequent calls.
42
43
44 """
45 from kedro.framework.cli import load_entry_points
46 from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import
47 from kedro.framework.project import configure_project, pipelines
48 from kedro.framework.session import KedroSession
49 from kedro.framework.startup import bootstrap_project
50
51 # If a path is provided, set it as default for subsequent calls
52 global default_project_path
53 if path:
54 default_project_path = Path(path).expanduser().resolve()
55 logger.info("Updated path to Kedro project: %s", default_project_path)
56 else:
57 logger.info("No path argument was provided. Using: %s", default_project_path)
58
59 metadata = bootstrap_project(default_project_path)
60 _remove_cached_modules(metadata.package_name)
61 configure_project(metadata.package_name)
62
63 session = KedroSession.create(
64 metadata.package_name, default_project_path, env=env, extra_params=extra_params
65 )
66 logger.debug("Loading the context from %s", default_project_path)
67 context = session.load_context()
68 catalog = context.catalog
69
70 get_ipython().push(
71 variables={
72 "context": context,
73 "catalog": catalog,
74 "session": session,
75 "pipelines": pipelines,
76 }
77 )
78
79 logger.info("Kedro project %s", str(metadata.project_name))
80 logger.info(
81 "Defined global variable 'context', 'session', 'catalog' and 'pipelines'"
82 )
83
84 for line_magic in load_entry_points("line_magic"):
85 register_line_magic(needs_local_scope(line_magic))
86 logger.info("Registered line magic '%s'", line_magic.__name__) # type: ignore
87
88
89 def load_ipython_extension(ipython):
90 """Main entry point when %load_ext is executed"""
91
92 global default_project_path
93
94 ipython.register_magic_function(reload_kedro, "line", "reload_kedro")
95
96 default_project_path = _find_kedro_project(Path.cwd())
97
98 try:
99 reload_kedro(default_project_path)
100 except (ImportError, ModuleNotFoundError):
101 logger.error("Kedro appears not to be installed in your current environment.")
102 except Exception: # pylint: disable=broad-except
103 logger.warning(
104 "Kedro extension was registered but couldn't find a Kedro project. "
105 "Make sure you run '%reload_kedro <project_root>'."
106 )
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kedro/extras/extensions/ipython.py b/kedro/extras/extensions/ipython.py
--- a/kedro/extras/extensions/ipython.py
+++ b/kedro/extras/extensions/ipython.py
@@ -1,4 +1,4 @@
-# pylint: disable=import-outside-toplevel,global-statement,invalid-name
+# pylint: disable=import-outside-toplevel,global-statement,invalid-name,too-many-locals
"""
This script creates an IPython extension to load Kedro-related variables in
local scope.
@@ -8,9 +8,6 @@
from pathlib import Path
from typing import Any, Dict
-from IPython import get_ipython
-from IPython.core.magic import needs_local_scope, register_line_magic
-
logger = logging.getLogger(__name__)
default_project_path = Path.cwd()
@@ -39,9 +36,10 @@
):
"""Line magic which reloads all Kedro default variables.
Setting the path will also make it default for subsequent calls.
-
-
"""
+ from IPython import get_ipython
+ from IPython.core.magic import needs_local_scope, register_line_magic
+
from kedro.framework.cli import load_entry_points
from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import
from kedro.framework.project import configure_project, pipelines
@@ -63,7 +61,6 @@
session = KedroSession.create(
metadata.package_name, default_project_path, env=env, extra_params=extra_params
)
- logger.debug("Loading the context from %s", default_project_path)
context = session.load_context()
catalog = context.catalog
@@ -95,12 +92,11 @@
default_project_path = _find_kedro_project(Path.cwd())
- try:
- reload_kedro(default_project_path)
- except (ImportError, ModuleNotFoundError):
- logger.error("Kedro appears not to be installed in your current environment.")
- except Exception: # pylint: disable=broad-except
+ if default_project_path is None:
logger.warning(
"Kedro extension was registered but couldn't find a Kedro project. "
"Make sure you run '%reload_kedro <project_root>'."
)
+ return
+
+ reload_kedro(default_project_path)
| {"golden_diff": "diff --git a/kedro/extras/extensions/ipython.py b/kedro/extras/extensions/ipython.py\n--- a/kedro/extras/extensions/ipython.py\n+++ b/kedro/extras/extensions/ipython.py\n@@ -1,4 +1,4 @@\n-# pylint: disable=import-outside-toplevel,global-statement,invalid-name\n+# pylint: disable=import-outside-toplevel,global-statement,invalid-name,too-many-locals\n \"\"\"\n This script creates an IPython extension to load Kedro-related variables in\n local scope.\n@@ -8,9 +8,6 @@\n from pathlib import Path\n from typing import Any, Dict\n \n-from IPython import get_ipython\n-from IPython.core.magic import needs_local_scope, register_line_magic\n-\n logger = logging.getLogger(__name__)\n default_project_path = Path.cwd()\n \n@@ -39,9 +36,10 @@\n ):\n \"\"\"Line magic which reloads all Kedro default variables.\n Setting the path will also make it default for subsequent calls.\n-\n-\n \"\"\"\n+ from IPython import get_ipython\n+ from IPython.core.magic import needs_local_scope, register_line_magic\n+\n from kedro.framework.cli import load_entry_points\n from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import\n from kedro.framework.project import configure_project, pipelines\n@@ -63,7 +61,6 @@\n session = KedroSession.create(\n metadata.package_name, default_project_path, env=env, extra_params=extra_params\n )\n- logger.debug(\"Loading the context from %s\", default_project_path)\n context = session.load_context()\n catalog = context.catalog\n \n@@ -95,12 +92,11 @@\n \n default_project_path = _find_kedro_project(Path.cwd())\n \n- try:\n- reload_kedro(default_project_path)\n- except (ImportError, ModuleNotFoundError):\n- logger.error(\"Kedro appears not to be installed in your current environment.\")\n- except Exception: # pylint: disable=broad-except\n+ if default_project_path is None:\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n+ return\n+\n+ reload_kedro(default_project_path)\n", "issue": "Improve Jupyter workflow when configuration is broken\n## Description\r\n> Is your feature request related to a problem? A clear and concise description of what the problem is: \"I'm always frustrated when ...\"\r\n\r\nUsing the spaceflights demo on 0.18.1 if I open a `kedro jupyter notebook` session and write `catalog.list()` I will get the following:\r\n\r\n\r\n\r\nIf I break the `catalog.yml` file so that it is unparsable and run `kedro catalog list` (or any other command like `kedro run` I will get a helpful parse error in the terminal: \r\n\r\n<img width=\"654\" alt=\"image\" src=\"https://user-images.githubusercontent.com/35801847/172139490-a079095e-52d7-4c80-a8d3-15c5a675239e.png\">\r\n\r\nHowever, if I start a `kedro jupyter notebook` session in this state, I get no warning or messaging that Kedro is unable to create the `catalog`, `session` and other global variables. As a user I get this confusing state where the `catalog` is no longer available to me:\r\n\r\n<img width=\"636\" alt=\"image\" src=\"https://user-images.githubusercontent.com/35801847/172139614-8dabbc13-38a4-4785-9afe-bbddf77b974a.png\">\r\n\r\n\r\n## Context\r\n> Why is this change important to you? How would you use it? How can it benefit other users?\r\n\r\nI think novice users can get stuck here with no direction as to why the catalog is not available.\r\n\r\n## Possible Implementation\r\n\r\nSurface warning to the jupyter context, perhaps even make a dummy `catalog`, `context`, `session` objects available that expose the log message when called...\n", "before_files": [{"content": "# pylint: disable=import-outside-toplevel,global-statement,invalid-name\n\"\"\"\nThis script creates an IPython extension to load Kedro-related variables in\nlocal scope.\n\"\"\"\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom IPython import get_ipython\nfrom IPython.core.magic import needs_local_scope, register_line_magic\n\nlogger = logging.getLogger(__name__)\ndefault_project_path = Path.cwd()\n\n\ndef _remove_cached_modules(package_name):\n to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]\n # `del` is used instead of `reload()` because: If the new version of a module does not\n # define a name that was defined by the old version, the old definition remains.\n for module in to_remove:\n del sys.modules[module] # pragma: no cover\n\n\ndef _find_kedro_project(current_dir: Path): # pragma: no cover\n from kedro.framework.startup import _is_project\n\n while current_dir != current_dir.parent:\n if _is_project(current_dir):\n return current_dir\n current_dir = current_dir.parent\n\n return None\n\n\ndef reload_kedro(\n path: str = None, env: str = None, extra_params: Dict[str, Any] = None\n):\n \"\"\"Line magic which reloads all Kedro default variables.\n Setting the path will also make it default for subsequent calls.\n\n\n \"\"\"\n from kedro.framework.cli import load_entry_points\n from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import\n from kedro.framework.project import configure_project, pipelines\n from kedro.framework.session import KedroSession\n from kedro.framework.startup import bootstrap_project\n\n # If a path is provided, set it as default for subsequent calls\n global default_project_path\n if path:\n default_project_path = Path(path).expanduser().resolve()\n logger.info(\"Updated path to Kedro project: %s\", default_project_path)\n else:\n logger.info(\"No path argument was provided. Using: %s\", default_project_path)\n\n metadata = bootstrap_project(default_project_path)\n _remove_cached_modules(metadata.package_name)\n configure_project(metadata.package_name)\n\n session = KedroSession.create(\n metadata.package_name, default_project_path, env=env, extra_params=extra_params\n )\n logger.debug(\"Loading the context from %s\", default_project_path)\n context = session.load_context()\n catalog = context.catalog\n\n get_ipython().push(\n variables={\n \"context\": context,\n \"catalog\": catalog,\n \"session\": session,\n \"pipelines\": pipelines,\n }\n )\n\n logger.info(\"Kedro project %s\", str(metadata.project_name))\n logger.info(\n \"Defined global variable 'context', 'session', 'catalog' and 'pipelines'\"\n )\n\n for line_magic in load_entry_points(\"line_magic\"):\n register_line_magic(needs_local_scope(line_magic))\n logger.info(\"Registered line magic '%s'\", line_magic.__name__) # type: ignore\n\n\ndef load_ipython_extension(ipython):\n \"\"\"Main entry point when %load_ext is executed\"\"\"\n\n global default_project_path\n\n ipython.register_magic_function(reload_kedro, \"line\", \"reload_kedro\")\n\n default_project_path = _find_kedro_project(Path.cwd())\n\n try:\n reload_kedro(default_project_path)\n except (ImportError, ModuleNotFoundError):\n logger.error(\"Kedro appears not to be installed in your current environment.\")\n except Exception: # pylint: disable=broad-except\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n", "path": "kedro/extras/extensions/ipython.py"}], "after_files": [{"content": "# pylint: disable=import-outside-toplevel,global-statement,invalid-name,too-many-locals\n\"\"\"\nThis script creates an IPython extension to load Kedro-related variables in\nlocal scope.\n\"\"\"\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nlogger = logging.getLogger(__name__)\ndefault_project_path = Path.cwd()\n\n\ndef _remove_cached_modules(package_name):\n to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]\n # `del` is used instead of `reload()` because: If the new version of a module does not\n # define a name that was defined by the old version, the old definition remains.\n for module in to_remove:\n del sys.modules[module] # pragma: no cover\n\n\ndef _find_kedro_project(current_dir: Path): # pragma: no cover\n from kedro.framework.startup import _is_project\n\n while current_dir != current_dir.parent:\n if _is_project(current_dir):\n return current_dir\n current_dir = current_dir.parent\n\n return None\n\n\ndef reload_kedro(\n path: str = None, env: str = None, extra_params: Dict[str, Any] = None\n):\n \"\"\"Line magic which reloads all Kedro default variables.\n Setting the path will also make it default for subsequent calls.\n \"\"\"\n from IPython import get_ipython\n from IPython.core.magic import needs_local_scope, register_line_magic\n\n from kedro.framework.cli import load_entry_points\n from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import\n from kedro.framework.project import configure_project, pipelines\n from kedro.framework.session import KedroSession\n from kedro.framework.startup import bootstrap_project\n\n # If a path is provided, set it as default for subsequent calls\n global default_project_path\n if path:\n default_project_path = Path(path).expanduser().resolve()\n logger.info(\"Updated path to Kedro project: %s\", default_project_path)\n else:\n logger.info(\"No path argument was provided. Using: %s\", default_project_path)\n\n metadata = bootstrap_project(default_project_path)\n _remove_cached_modules(metadata.package_name)\n configure_project(metadata.package_name)\n\n session = KedroSession.create(\n metadata.package_name, default_project_path, env=env, extra_params=extra_params\n )\n context = session.load_context()\n catalog = context.catalog\n\n get_ipython().push(\n variables={\n \"context\": context,\n \"catalog\": catalog,\n \"session\": session,\n \"pipelines\": pipelines,\n }\n )\n\n logger.info(\"Kedro project %s\", str(metadata.project_name))\n logger.info(\n \"Defined global variable 'context', 'session', 'catalog' and 'pipelines'\"\n )\n\n for line_magic in load_entry_points(\"line_magic\"):\n register_line_magic(needs_local_scope(line_magic))\n logger.info(\"Registered line magic '%s'\", line_magic.__name__) # type: ignore\n\n\ndef load_ipython_extension(ipython):\n \"\"\"Main entry point when %load_ext is executed\"\"\"\n\n global default_project_path\n\n ipython.register_magic_function(reload_kedro, \"line\", \"reload_kedro\")\n\n default_project_path = _find_kedro_project(Path.cwd())\n\n if default_project_path is None:\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n return\n\n reload_kedro(default_project_path)\n", "path": "kedro/extras/extensions/ipython.py"}]} | 1,794 | 521 |
gh_patches_debug_10939 | rasdani/github-patches | git_diff | interlegis__sapl-2045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Número seguinte de norma jurídica
Ao inserir uma uma norma jurídica do tipo Portaria, oferece sempre o número 10 como próximo, embora já tenhamos superado tal número.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/norma/views.py`
Content:
```
1
2 import weasyprint
3 from django.contrib.auth.mixins import PermissionRequiredMixin
4 from django.core.exceptions import ObjectDoesNotExist
5 from django.core.urlresolvers import reverse
6 from django.http import HttpResponse, JsonResponse
7 from django.template import RequestContext, loader
8 from django.utils import timezone
9 from django.utils.translation import ugettext_lazy as _
10 from django.views.generic import TemplateView, UpdateView
11 from django.views.generic.base import RedirectView
12 from django.views.generic.edit import FormView
13 from django_filters.views import FilterView
14
15 from sapl.base.models import AppConfig
16 from sapl.compilacao.views import IntegracaoTaView
17 from sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,
18 MasterDetailCrud, make_pagination)
19 from sapl.utils import show_results_filter_set
20
21 from .forms import (NormaFilterSet, NormaJuridicaForm,
22 NormaPesquisaSimplesForm, NormaRelacionadaForm)
23 from .models import (AssuntoNorma, NormaJuridica, NormaRelacionada,
24 TipoNormaJuridica, TipoVinculoNormaJuridica)
25
26 # LegislacaoCitadaCrud = Crud.build(LegislacaoCitada, '')
27 AssuntoNormaCrud = CrudAux.build(AssuntoNorma, 'assunto_norma_juridica',
28 list_field_names=['assunto', 'descricao'])
29
30
31 TipoNormaCrud = CrudAux.build(
32 TipoNormaJuridica, 'tipo_norma_juridica',
33 list_field_names=['sigla', 'descricao', 'equivalente_lexml'])
34 TipoVinculoNormaJuridicaCrud = CrudAux.build(
35 TipoVinculoNormaJuridica, '',
36 list_field_names=['sigla', 'descricao_ativa', 'descricao_passiva'])
37
38
39 class NormaRelacionadaCrud(MasterDetailCrud):
40 model = NormaRelacionada
41 parent_field = 'norma_principal'
42 help_topic = 'norma_juridica'
43
44 class BaseMixin(MasterDetailCrud.BaseMixin):
45 list_field_names = ['norma_relacionada', 'tipo_vinculo']
46
47 class CreateView(MasterDetailCrud.CreateView):
48 form_class = NormaRelacionadaForm
49
50 class UpdateView(MasterDetailCrud.UpdateView):
51 form_class = NormaRelacionadaForm
52
53 def get_initial(self):
54 initial = super(UpdateView, self).get_initial()
55 initial['tipo'] = self.object.norma_relacionada.tipo.id
56 initial['numero'] = self.object.norma_relacionada.numero
57 initial['ano'] = self.object.norma_relacionada.ano
58 initial['ementa'] = self.object.norma_relacionada.ementa
59 return initial
60
61 class DetailView(MasterDetailCrud.DetailView):
62
63 layout_key = 'NormaRelacionadaDetail'
64
65
66 class NormaPesquisaView(FilterView):
67 model = NormaJuridica
68 filterset_class = NormaFilterSet
69 paginate_by = 10
70
71 def get_queryset(self):
72 qs = super().get_queryset()
73
74 qs.select_related('tipo', 'materia')
75
76 return qs
77
78 def get_context_data(self, **kwargs):
79 context = super(NormaPesquisaView, self).get_context_data(**kwargs)
80
81 context['title'] = _('Pesquisar Norma Jurídica')
82
83 qr = self.request.GET.copy()
84
85 if 'page' in qr:
86 del qr['page']
87
88 paginator = context['paginator']
89 page_obj = context['page_obj']
90
91 context['page_range'] = make_pagination(
92 page_obj.number, paginator.num_pages)
93
94 context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''
95
96 context['show_results'] = show_results_filter_set(qr)
97
98 return context
99
100
101 class NormaTaView(IntegracaoTaView):
102 model = NormaJuridica
103 model_type_foreignkey = TipoNormaJuridica
104 map_fields = {
105 'data': 'data',
106 'ementa': 'ementa',
107 'observacao': 'observacao',
108 'numero': 'numero',
109 'ano': 'ano',
110 }
111
112 map_funcs = {
113 'publicacao_func': True
114 }
115
116 def get(self, request, *args, **kwargs):
117 """
118 Para manter a app compilacao isolada das outras aplicações,
119 este get foi implementado para tratar uma prerrogativa externa
120 de usuário.
121 """
122 if AppConfig.attr('texto_articulado_norma'):
123 return IntegracaoTaView.get(self, request, *args, **kwargs)
124 else:
125 return self.get_redirect_deactivated()
126
127
128 class NormaCrud(Crud):
129 model = NormaJuridica
130 help_topic = 'norma_juridica'
131 public = [RP_LIST, RP_DETAIL]
132
133 class BaseMixin(Crud.BaseMixin):
134 list_field_names = ['tipo', 'numero', 'ano', 'ementa']
135
136 list_url = ''
137
138 @property
139 def search_url(self):
140 namespace = self.model._meta.app_config.name
141 return reverse('%s:%s' % (namespace, 'norma_pesquisa'))
142
143 class DetailView(Crud.DetailView):
144 pass
145
146 class DeleteView(Crud.DeleteView):
147
148 def get_success_url(self):
149 return self.search_url
150
151 class CreateView(Crud.CreateView):
152 form_class = NormaJuridicaForm
153
154 @property
155 def cancel_url(self):
156 return self.search_url
157
158 layout_key = 'NormaJuridicaCreate'
159
160 class ListView(Crud.ListView, RedirectView):
161
162 def get_redirect_url(self, *args, **kwargs):
163 namespace = self.model._meta.app_config.name
164 return reverse('%s:%s' % (namespace, 'norma_pesquisa'))
165
166 def get(self, request, *args, **kwargs):
167 return RedirectView.get(self, request, *args, **kwargs)
168
169 class UpdateView(Crud.UpdateView):
170 form_class = NormaJuridicaForm
171
172 layout_key = 'NormaJuridicaCreate'
173
174 def get_initial(self):
175 initial = super(UpdateView, self).get_initial()
176 norma = NormaJuridica.objects.get(id=self.kwargs['pk'])
177 if norma.materia:
178 initial['tipo_materia'] = norma.materia.tipo
179 initial['ano_materia'] = norma.materia.ano
180 initial['numero_materia'] = norma.materia.numero
181 return initial
182
183
184 def recuperar_norma(request):
185 tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])
186 numero = request.GET['numero']
187 ano = request.GET['ano']
188
189 try:
190 norma = NormaJuridica.objects.get(tipo=tipo,
191 ano=ano,
192 numero=numero)
193 response = JsonResponse({'ementa': norma.ementa,
194 'id': norma.id})
195 except ObjectDoesNotExist:
196 response = JsonResponse({'ementa': '', 'id': 0})
197
198 return response
199
200
201 def recuperar_numero_norma(request):
202 tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])
203 ano = request.GET.get('ano', '')
204
205 param = {'tipo': tipo}
206 param['ano'] = ano if ano else timezone.now().year
207
208 norma = NormaJuridica.objects.filter(**param).order_by(
209 'tipo', 'ano', 'numero').values_list('numero', 'ano').last()
210 if norma:
211 response = JsonResponse({'numero': int(norma[0]) + 1,
212 'ano': norma[1]})
213 else:
214 response = JsonResponse(
215 {'numero': 1, 'ano': ano})
216
217 return response
218
219
220 class ImpressosView(PermissionRequiredMixin, TemplateView):
221 template_name = 'materia/impressos/impressos.html'
222 permission_required = ('materia.can_access_impressos', )
223
224
225 def gerar_pdf_impressos(request, context, template_name):
226 template = loader.get_template(template_name)
227 html = template.render(RequestContext(request, context))
228 pdf = weasyprint.HTML(string=html, base_url=request.build_absolute_uri()
229 ).write_pdf()
230
231 response = HttpResponse(pdf, content_type='application/pdf')
232 response['Content-Disposition'] = (
233 'inline; filename="relatorio_impressos.pdf"')
234 response['Content-Transfer-Encoding'] = 'binary'
235
236 return response
237
238
239 class NormaPesquisaSimplesView(PermissionRequiredMixin, FormView):
240 form_class = NormaPesquisaSimplesForm
241 template_name = 'materia/impressos/norma.html'
242 permission_required = ('materia.can_access_impressos', )
243
244 def form_valid(self, form):
245 normas = NormaJuridica.objects.all().order_by(
246 'numero')
247 template_norma = 'materia/impressos/normas_pdf.html'
248
249 titulo = form.cleaned_data['titulo']
250
251 if form.cleaned_data['tipo_norma']:
252 normas = normas.filter(tipo=form.cleaned_data['tipo_norma'])
253
254 if form.cleaned_data['data_inicial']:
255 normas = normas.filter(
256 data__gte=form.cleaned_data['data_inicial'],
257 data__lte=form.cleaned_data['data_final'])
258
259 qtd_resultados = len(normas)
260 if qtd_resultados > 2000:
261 normas = normas[:2000]
262
263 context = {'quantidade': qtd_resultados,
264 'titulo': titulo,
265 'normas': normas}
266
267 return gerar_pdf_impressos(self.request, context, template_norma)
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sapl/norma/views.py b/sapl/norma/views.py
--- a/sapl/norma/views.py
+++ b/sapl/norma/views.py
@@ -204,9 +204,9 @@
param = {'tipo': tipo}
param['ano'] = ano if ano else timezone.now().year
-
- norma = NormaJuridica.objects.filter(**param).order_by(
- 'tipo', 'ano', 'numero').values_list('numero', 'ano').last()
+ norma = NormaJuridica.objects.filter(**param).extra(
+ {'numero_id': "CAST(numero as INTEGER)"}).order_by(
+ 'tipo', 'ano','numero_id').values_list('numero', 'ano').last()
if norma:
response = JsonResponse({'numero': int(norma[0]) + 1,
'ano': norma[1]})
| {"golden_diff": "diff --git a/sapl/norma/views.py b/sapl/norma/views.py\n--- a/sapl/norma/views.py\n+++ b/sapl/norma/views.py\n@@ -204,9 +204,9 @@\n \n param = {'tipo': tipo}\n param['ano'] = ano if ano else timezone.now().year\n-\n- norma = NormaJuridica.objects.filter(**param).order_by(\n- 'tipo', 'ano', 'numero').values_list('numero', 'ano').last()\n+ norma = NormaJuridica.objects.filter(**param).extra(\n+ {'numero_id': \"CAST(numero as INTEGER)\"}).order_by(\n+ 'tipo', 'ano','numero_id').values_list('numero', 'ano').last()\n if norma:\n response = JsonResponse({'numero': int(norma[0]) + 1,\n 'ano': norma[1]})\n", "issue": "N\u00famero seguinte de norma jur\u00eddica\nAo inserir uma uma norma jur\u00eddica do tipo Portaria, oferece sempre o n\u00famero 10 como pr\u00f3ximo, embora j\u00e1 tenhamos superado tal n\u00famero.\n", "before_files": [{"content": "\nimport weasyprint\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import TemplateView, UpdateView\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.edit import FormView\nfrom django_filters.views import FilterView\n\nfrom sapl.base.models import AppConfig\nfrom sapl.compilacao.views import IntegracaoTaView\nfrom sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,\n MasterDetailCrud, make_pagination)\nfrom sapl.utils import show_results_filter_set\n\nfrom .forms import (NormaFilterSet, NormaJuridicaForm,\n NormaPesquisaSimplesForm, NormaRelacionadaForm)\nfrom .models import (AssuntoNorma, NormaJuridica, NormaRelacionada,\n TipoNormaJuridica, TipoVinculoNormaJuridica)\n\n# LegislacaoCitadaCrud = Crud.build(LegislacaoCitada, '')\nAssuntoNormaCrud = CrudAux.build(AssuntoNorma, 'assunto_norma_juridica',\n list_field_names=['assunto', 'descricao'])\n\n\nTipoNormaCrud = CrudAux.build(\n TipoNormaJuridica, 'tipo_norma_juridica',\n list_field_names=['sigla', 'descricao', 'equivalente_lexml'])\nTipoVinculoNormaJuridicaCrud = CrudAux.build(\n TipoVinculoNormaJuridica, '',\n list_field_names=['sigla', 'descricao_ativa', 'descricao_passiva'])\n\n\nclass NormaRelacionadaCrud(MasterDetailCrud):\n model = NormaRelacionada\n parent_field = 'norma_principal'\n help_topic = 'norma_juridica'\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['norma_relacionada', 'tipo_vinculo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = NormaRelacionadaForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = NormaRelacionadaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n initial['tipo'] = self.object.norma_relacionada.tipo.id\n initial['numero'] = self.object.norma_relacionada.numero\n initial['ano'] = self.object.norma_relacionada.ano\n initial['ementa'] = self.object.norma_relacionada.ementa\n return initial\n\n class DetailView(MasterDetailCrud.DetailView):\n\n layout_key = 'NormaRelacionadaDetail'\n\n\nclass NormaPesquisaView(FilterView):\n model = NormaJuridica\n filterset_class = NormaFilterSet\n paginate_by = 10\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n qs.select_related('tipo', 'materia')\n\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(NormaPesquisaView, self).get_context_data(**kwargs)\n\n context['title'] = _('Pesquisar Norma Jur\u00eddica')\n\n qr = self.request.GET.copy()\n\n if 'page' in qr:\n del qr['page']\n\n paginator = context['paginator']\n page_obj = context['page_obj']\n\n context['page_range'] = make_pagination(\n page_obj.number, paginator.num_pages)\n\n context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''\n\n context['show_results'] = show_results_filter_set(qr)\n\n return context\n\n\nclass NormaTaView(IntegracaoTaView):\n model = NormaJuridica\n model_type_foreignkey = TipoNormaJuridica\n map_fields = {\n 'data': 'data',\n 'ementa': 'ementa',\n 'observacao': 'observacao',\n 'numero': 'numero',\n 'ano': 'ano',\n }\n\n map_funcs = {\n 'publicacao_func': True\n }\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Para manter a app compilacao isolada das outras aplica\u00e7\u00f5es,\n este get foi implementado para tratar uma prerrogativa externa\n de usu\u00e1rio.\n \"\"\"\n if AppConfig.attr('texto_articulado_norma'):\n return IntegracaoTaView.get(self, request, *args, **kwargs)\n else:\n return self.get_redirect_deactivated()\n\n\nclass NormaCrud(Crud):\n model = NormaJuridica\n help_topic = 'norma_juridica'\n public = [RP_LIST, RP_DETAIL]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['tipo', 'numero', 'ano', 'ementa']\n\n list_url = ''\n\n @property\n def search_url(self):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n class DetailView(Crud.DetailView):\n pass\n\n class DeleteView(Crud.DeleteView):\n\n def get_success_url(self):\n return self.search_url\n\n class CreateView(Crud.CreateView):\n form_class = NormaJuridicaForm\n\n @property\n def cancel_url(self):\n return self.search_url\n\n layout_key = 'NormaJuridicaCreate'\n\n class ListView(Crud.ListView, RedirectView):\n\n def get_redirect_url(self, *args, **kwargs):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n def get(self, request, *args, **kwargs):\n return RedirectView.get(self, request, *args, **kwargs)\n\n class UpdateView(Crud.UpdateView):\n form_class = NormaJuridicaForm\n\n layout_key = 'NormaJuridicaCreate'\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n norma = NormaJuridica.objects.get(id=self.kwargs['pk'])\n if norma.materia:\n initial['tipo_materia'] = norma.materia.tipo\n initial['ano_materia'] = norma.materia.ano\n initial['numero_materia'] = norma.materia.numero\n return initial\n\n\ndef recuperar_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n numero = request.GET['numero']\n ano = request.GET['ano']\n\n try:\n norma = NormaJuridica.objects.get(tipo=tipo,\n ano=ano,\n numero=numero)\n response = JsonResponse({'ementa': norma.ementa,\n 'id': norma.id})\n except ObjectDoesNotExist:\n response = JsonResponse({'ementa': '', 'id': 0})\n\n return response\n\n\ndef recuperar_numero_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n ano = request.GET.get('ano', '')\n\n param = {'tipo': tipo}\n param['ano'] = ano if ano else timezone.now().year\n\n norma = NormaJuridica.objects.filter(**param).order_by(\n 'tipo', 'ano', 'numero').values_list('numero', 'ano').last()\n if norma:\n response = JsonResponse({'numero': int(norma[0]) + 1,\n 'ano': norma[1]})\n else:\n response = JsonResponse(\n {'numero': 1, 'ano': ano})\n\n return response\n\n\nclass ImpressosView(PermissionRequiredMixin, TemplateView):\n template_name = 'materia/impressos/impressos.html'\n permission_required = ('materia.can_access_impressos', )\n\n\ndef gerar_pdf_impressos(request, context, template_name):\n template = loader.get_template(template_name)\n html = template.render(RequestContext(request, context))\n pdf = weasyprint.HTML(string=html, base_url=request.build_absolute_uri()\n ).write_pdf()\n\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = (\n 'inline; filename=\"relatorio_impressos.pdf\"')\n response['Content-Transfer-Encoding'] = 'binary'\n\n return response\n\n\nclass NormaPesquisaSimplesView(PermissionRequiredMixin, FormView):\n form_class = NormaPesquisaSimplesForm\n template_name = 'materia/impressos/norma.html'\n permission_required = ('materia.can_access_impressos', )\n\n def form_valid(self, form):\n normas = NormaJuridica.objects.all().order_by(\n 'numero')\n template_norma = 'materia/impressos/normas_pdf.html'\n\n titulo = form.cleaned_data['titulo']\n\n if form.cleaned_data['tipo_norma']:\n normas = normas.filter(tipo=form.cleaned_data['tipo_norma'])\n\n if form.cleaned_data['data_inicial']:\n normas = normas.filter(\n data__gte=form.cleaned_data['data_inicial'],\n data__lte=form.cleaned_data['data_final'])\n\n qtd_resultados = len(normas)\n if qtd_resultados > 2000:\n normas = normas[:2000]\n\n context = {'quantidade': qtd_resultados,\n 'titulo': titulo,\n 'normas': normas}\n\n return gerar_pdf_impressos(self.request, context, template_norma)\n", "path": "sapl/norma/views.py"}], "after_files": [{"content": "\nimport weasyprint\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import TemplateView, UpdateView\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.edit import FormView\nfrom django_filters.views import FilterView\n\nfrom sapl.base.models import AppConfig\nfrom sapl.compilacao.views import IntegracaoTaView\nfrom sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,\n MasterDetailCrud, make_pagination)\nfrom sapl.utils import show_results_filter_set\n\nfrom .forms import (NormaFilterSet, NormaJuridicaForm,\n NormaPesquisaSimplesForm, NormaRelacionadaForm)\nfrom .models import (AssuntoNorma, NormaJuridica, NormaRelacionada,\n TipoNormaJuridica, TipoVinculoNormaJuridica)\n\n# LegislacaoCitadaCrud = Crud.build(LegislacaoCitada, '')\nAssuntoNormaCrud = CrudAux.build(AssuntoNorma, 'assunto_norma_juridica',\n list_field_names=['assunto', 'descricao'])\n\n\nTipoNormaCrud = CrudAux.build(\n TipoNormaJuridica, 'tipo_norma_juridica',\n list_field_names=['sigla', 'descricao', 'equivalente_lexml'])\nTipoVinculoNormaJuridicaCrud = CrudAux.build(\n TipoVinculoNormaJuridica, '',\n list_field_names=['sigla', 'descricao_ativa', 'descricao_passiva'])\n\n\nclass NormaRelacionadaCrud(MasterDetailCrud):\n model = NormaRelacionada\n parent_field = 'norma_principal'\n help_topic = 'norma_juridica'\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['norma_relacionada', 'tipo_vinculo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = NormaRelacionadaForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = NormaRelacionadaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n initial['tipo'] = self.object.norma_relacionada.tipo.id\n initial['numero'] = self.object.norma_relacionada.numero\n initial['ano'] = self.object.norma_relacionada.ano\n initial['ementa'] = self.object.norma_relacionada.ementa\n return initial\n\n class DetailView(MasterDetailCrud.DetailView):\n\n layout_key = 'NormaRelacionadaDetail'\n\n\nclass NormaPesquisaView(FilterView):\n model = NormaJuridica\n filterset_class = NormaFilterSet\n paginate_by = 10\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n qs.select_related('tipo', 'materia')\n\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(NormaPesquisaView, self).get_context_data(**kwargs)\n\n context['title'] = _('Pesquisar Norma Jur\u00eddica')\n\n qr = self.request.GET.copy()\n\n if 'page' in qr:\n del qr['page']\n\n paginator = context['paginator']\n page_obj = context['page_obj']\n\n context['page_range'] = make_pagination(\n page_obj.number, paginator.num_pages)\n\n context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''\n\n context['show_results'] = show_results_filter_set(qr)\n\n return context\n\n\nclass NormaTaView(IntegracaoTaView):\n model = NormaJuridica\n model_type_foreignkey = TipoNormaJuridica\n map_fields = {\n 'data': 'data',\n 'ementa': 'ementa',\n 'observacao': 'observacao',\n 'numero': 'numero',\n 'ano': 'ano',\n }\n\n map_funcs = {\n 'publicacao_func': True\n }\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Para manter a app compilacao isolada das outras aplica\u00e7\u00f5es,\n este get foi implementado para tratar uma prerrogativa externa\n de usu\u00e1rio.\n \"\"\"\n if AppConfig.attr('texto_articulado_norma'):\n return IntegracaoTaView.get(self, request, *args, **kwargs)\n else:\n return self.get_redirect_deactivated()\n\n\nclass NormaCrud(Crud):\n model = NormaJuridica\n help_topic = 'norma_juridica'\n public = [RP_LIST, RP_DETAIL]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['tipo', 'numero', 'ano', 'ementa']\n\n list_url = ''\n\n @property\n def search_url(self):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n class DetailView(Crud.DetailView):\n pass\n\n class DeleteView(Crud.DeleteView):\n\n def get_success_url(self):\n return self.search_url\n\n class CreateView(Crud.CreateView):\n form_class = NormaJuridicaForm\n\n @property\n def cancel_url(self):\n return self.search_url\n\n layout_key = 'NormaJuridicaCreate'\n\n class ListView(Crud.ListView, RedirectView):\n\n def get_redirect_url(self, *args, **kwargs):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n def get(self, request, *args, **kwargs):\n return RedirectView.get(self, request, *args, **kwargs)\n\n class UpdateView(Crud.UpdateView):\n form_class = NormaJuridicaForm\n\n layout_key = 'NormaJuridicaCreate'\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n norma = NormaJuridica.objects.get(id=self.kwargs['pk'])\n if norma.materia:\n initial['tipo_materia'] = norma.materia.tipo\n initial['ano_materia'] = norma.materia.ano\n initial['numero_materia'] = norma.materia.numero\n return initial\n\n\ndef recuperar_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n numero = request.GET['numero']\n ano = request.GET['ano']\n\n try:\n norma = NormaJuridica.objects.get(tipo=tipo,\n ano=ano,\n numero=numero)\n response = JsonResponse({'ementa': norma.ementa,\n 'id': norma.id})\n except ObjectDoesNotExist:\n response = JsonResponse({'ementa': '', 'id': 0})\n\n return response\n\n\ndef recuperar_numero_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n ano = request.GET.get('ano', '')\n\n param = {'tipo': tipo}\n param['ano'] = ano if ano else timezone.now().year\n norma = NormaJuridica.objects.filter(**param).extra(\n {'numero_id': \"CAST(numero as INTEGER)\"}).order_by(\n 'tipo', 'ano','numero_id').values_list('numero', 'ano').last()\n if norma:\n response = JsonResponse({'numero': int(norma[0]) + 1,\n 'ano': norma[1]})\n else:\n response = JsonResponse(\n {'numero': 1, 'ano': ano})\n\n return response\n\n\nclass ImpressosView(PermissionRequiredMixin, TemplateView):\n template_name = 'materia/impressos/impressos.html'\n permission_required = ('materia.can_access_impressos', )\n\n\ndef gerar_pdf_impressos(request, context, template_name):\n template = loader.get_template(template_name)\n html = template.render(RequestContext(request, context))\n pdf = weasyprint.HTML(string=html, base_url=request.build_absolute_uri()\n ).write_pdf()\n\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = (\n 'inline; filename=\"relatorio_impressos.pdf\"')\n response['Content-Transfer-Encoding'] = 'binary'\n\n return response\n\n\nclass NormaPesquisaSimplesView(PermissionRequiredMixin, FormView):\n form_class = NormaPesquisaSimplesForm\n template_name = 'materia/impressos/norma.html'\n permission_required = ('materia.can_access_impressos', )\n\n def form_valid(self, form):\n normas = NormaJuridica.objects.all().order_by(\n 'numero')\n template_norma = 'materia/impressos/normas_pdf.html'\n\n titulo = form.cleaned_data['titulo']\n\n if form.cleaned_data['tipo_norma']:\n normas = normas.filter(tipo=form.cleaned_data['tipo_norma'])\n\n if form.cleaned_data['data_inicial']:\n normas = normas.filter(\n data__gte=form.cleaned_data['data_inicial'],\n data__lte=form.cleaned_data['data_final'])\n\n qtd_resultados = len(normas)\n if qtd_resultados > 2000:\n normas = normas[:2000]\n\n context = {'quantidade': qtd_resultados,\n 'titulo': titulo,\n 'normas': normas}\n\n return gerar_pdf_impressos(self.request, context, template_norma)\n", "path": "sapl/norma/views.py"}]} | 3,138 | 204 |
gh_patches_debug_43422 | rasdani/github-patches | git_diff | encode__starlette-92 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `allow_origin_regex` to CORSMiddleware.
It'd be helpful if `CORSMiddleware` supported an `allow_origin_regex`, so that users could do...
```python
# Enforce a subdomain CORS policy
app.add_middleware(CORSMiddleware, allow_origin_regex="(http|https)://*.example.com")
```
Or...
```python
# Enforce an HTTPS-only CORS policy.
app.add_middleware(CORSMiddleware, allow_origin_regex="https://*")
```
The string should be compiled to a regex by the middleware and matches should be anchored to the start/end of the origin string.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/cors.py`
Content:
```
1 from starlette.datastructures import Headers, MutableHeaders, URL
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import functools
5 import typing
6
7
8 ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
9
10
11 class CORSMiddleware:
12 def __init__(
13 self,
14 app: ASGIApp,
15 allow_origins: typing.Sequence[str] = (),
16 allow_methods: typing.Sequence[str] = ("GET",),
17 allow_headers: typing.Sequence[str] = (),
18 allow_credentials: bool = False,
19 expose_headers: typing.Sequence[str] = (),
20 max_age: int = 600,
21 ):
22
23 if "*" in allow_methods:
24 allow_methods = ALL_METHODS
25
26 simple_headers = {}
27 if "*" in allow_origins:
28 simple_headers["Access-Control-Allow-Origin"] = "*"
29 if allow_credentials:
30 simple_headers["Access-Control-Allow-Credentials"] = "true"
31 if expose_headers:
32 simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
33
34 preflight_headers = {}
35 if "*" in allow_origins:
36 preflight_headers["Access-Control-Allow-Origin"] = "*"
37 else:
38 preflight_headers["Vary"] = "Origin"
39 preflight_headers.update(
40 {
41 "Access-Control-Allow-Methods": ", ".join(allow_methods),
42 "Access-Control-Max-Age": str(max_age),
43 }
44 )
45 if allow_headers and "*" not in allow_headers:
46 preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
47 if allow_credentials:
48 preflight_headers["Access-Control-Allow-Credentials"] = "true"
49
50 self.app = app
51 self.allow_origins = allow_origins
52 self.allow_methods = allow_methods
53 self.allow_headers = allow_headers
54 self.allow_all_origins = "*" in allow_origins
55 self.allow_all_headers = "*" in allow_headers
56 self.simple_headers = simple_headers
57 self.preflight_headers = preflight_headers
58
59 def __call__(self, scope: Scope):
60 if scope["type"] == "http":
61 method = scope["method"]
62 headers = Headers(scope["headers"])
63 origin = headers.get("origin")
64
65 if origin is not None:
66 if method == "OPTIONS" and "access-control-request-method" in headers:
67 return self.preflight_response(request_headers=headers)
68 else:
69 return functools.partial(
70 self.simple_response, scope=scope, origin=origin
71 )
72
73 return self.app(scope)
74
75 def preflight_response(self, request_headers):
76 requested_origin = request_headers["origin"]
77 requested_method = request_headers["access-control-request-method"]
78 requested_headers = request_headers.get("access-control-request-headers")
79 requested_cookie = "cookie" in request_headers
80
81 headers = dict(self.preflight_headers)
82 failures = []
83
84 # If we only allow specific origins, then we have to mirror back
85 # the Origin header in the response.
86 if not self.allow_all_origins:
87 if requested_origin in self.allow_origins:
88 headers["Access-Control-Allow-Origin"] = requested_origin
89 else:
90 failures.append("origin")
91
92 if requested_method not in self.allow_methods:
93 failures.append("method")
94
95 # If we allow all headers, then we have to mirror back any requested
96 # headers in the response.
97 if self.allow_all_headers and requested_headers is not None:
98 headers["Access-Control-Allow-Headers"] = requested_headers
99 elif requested_headers is not None:
100 for header in requested_headers.split(","):
101 if header.strip() not in self.allow_headers:
102 failures.append("headers")
103
104 # We don't strictly need to use 400 responses here, since its up to
105 # the browser to enforce the CORS policy, but its more informative
106 # if we do.
107 if failures:
108 failure_text = "Disallowed CORS " + ", ".join(failures)
109 return PlainTextResponse(failure_text, status_code=400, headers=headers)
110
111 return PlainTextResponse("OK", status_code=200, headers=headers)
112
113 async def simple_response(self, receive, send, scope=None, origin=None):
114 inner = self.app(scope)
115 send = functools.partial(self.send, send=send, origin=origin)
116 await inner(receive, send)
117
118 async def send(self, message, send=None, origin=None):
119 if message["type"] != "http.response.start":
120 await send(message)
121 return
122
123 message.setdefault("headers", [])
124 headers = MutableHeaders(message["headers"])
125
126 # If we only allow specific origins, then we have to mirror back
127 # the Origin header in the response.
128 if not self.allow_all_origins and origin in self.allow_origins:
129 headers["Access-Control-Allow-Origin"] = origin
130 headers.update(self.simple_headers)
131 await send(message)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -3,6 +3,7 @@
from starlette.types import ASGIApp, ASGIInstance, Scope
import functools
import typing
+import re
ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
@@ -16,6 +17,7 @@
allow_methods: typing.Sequence[str] = ("GET",),
allow_headers: typing.Sequence[str] = (),
allow_credentials: bool = False,
+ allow_origin_regex: str = None,
expose_headers: typing.Sequence[str] = (),
max_age: int = 600,
):
@@ -23,6 +25,10 @@
if "*" in allow_methods:
allow_methods = ALL_METHODS
+ if allow_origin_regex is not None:
+ regex = re.compile(allow_origin_regex)
+ allow_origin_regex = regex
+
simple_headers = {}
if "*" in allow_origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
@@ -53,6 +59,7 @@
self.allow_headers = allow_headers
self.allow_all_origins = "*" in allow_origins
self.allow_all_headers = "*" in allow_headers
+ self.allow_origin_regex = allow_origin_regex
self.simple_headers = simple_headers
self.preflight_headers = preflight_headers
@@ -66,12 +73,22 @@
if method == "OPTIONS" and "access-control-request-method" in headers:
return self.preflight_response(request_headers=headers)
else:
- return functools.partial(
- self.simple_response, scope=scope, origin=origin
- )
+ if self.is_allowed_origin(origin=origin):
+ return functools.partial(
+ self.simple_response, scope=scope, origin=origin
+ )
+ return PlainTextResponse("Disallowed CORS origin", status_code=400)
return self.app(scope)
+ def is_allowed_origin(self, origin):
+ if self.allow_origin_regex:
+ return self.allow_origin_regex.match(origin)
+ if self.allow_all_origins:
+ return True
+
+ return origin in self.allow_origins
+
def preflight_response(self, request_headers):
requested_origin = request_headers["origin"]
requested_method = request_headers["access-control-request-method"]
@@ -84,7 +101,7 @@
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
if not self.allow_all_origins:
- if requested_origin in self.allow_origins:
+ if self.is_allowed_origin(origin=requested_origin):
headers["Access-Control-Allow-Origin"] = requested_origin
else:
failures.append("origin")
@@ -125,7 +142,7 @@
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
- if not self.allow_all_origins and origin in self.allow_origins:
+ if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
headers["Access-Control-Allow-Origin"] = origin
headers.update(self.simple_headers)
await send(message)
| {"golden_diff": "diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py\n--- a/starlette/middleware/cors.py\n+++ b/starlette/middleware/cors.py\n@@ -3,6 +3,7 @@\n from starlette.types import ASGIApp, ASGIInstance, Scope\n import functools\n import typing\n+import re\n \n \n ALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n@@ -16,6 +17,7 @@\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n+ allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n@@ -23,6 +25,10 @@\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n \n+ if allow_origin_regex is not None:\n+ regex = re.compile(allow_origin_regex)\n+ allow_origin_regex = regex\n+\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n@@ -53,6 +59,7 @@\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n+ self.allow_origin_regex = allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n \n@@ -66,12 +73,22 @@\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n- return functools.partial(\n- self.simple_response, scope=scope, origin=origin\n- )\n+ if self.is_allowed_origin(origin=origin):\n+ return functools.partial(\n+ self.simple_response, scope=scope, origin=origin\n+ )\n+ return PlainTextResponse(\"Disallowed CORS origin\", status_code=400)\n \n return self.app(scope)\n \n+ def is_allowed_origin(self, origin):\n+ if self.allow_origin_regex:\n+ return self.allow_origin_regex.match(origin)\n+ if self.allow_all_origins:\n+ return True\n+\n+ return origin in self.allow_origins\n+\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n@@ -84,7 +101,7 @@\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n- if requested_origin in self.allow_origins:\n+ if self.is_allowed_origin(origin=requested_origin):\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n@@ -125,7 +142,7 @@\n \n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n- if not self.allow_all_origins and origin in self.allow_origins:\n+ if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "issue": "Add `allow_origin_regex` to CORSMiddleware.\nIt'd be helpful if `CORSMiddleware` supported an `allow_origin_regex`, so that users could do...\r\n\r\n```python\r\n# Enforce a subdomain CORS policy\r\napp.add_middleware(CORSMiddleware, allow_origin_regex=\"(http|https)://*.example.com\")\r\n```\r\n\r\nOr...\r\n\r\n```python\r\n# Enforce an HTTPS-only CORS policy.\r\napp.add_middleware(CORSMiddleware, allow_origin_regex=\"https://*\")\r\n```\r\n\r\nThe string should be compiled to a regex by the middleware and matches should be anchored to the start/end of the origin string.\n", "before_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n if requested_origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}], "after_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\nimport re\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n if allow_origin_regex is not None:\n regex = re.compile(allow_origin_regex)\n allow_origin_regex = regex\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.allow_origin_regex = allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n if self.is_allowed_origin(origin=origin):\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n return PlainTextResponse(\"Disallowed CORS origin\", status_code=400)\n\n return self.app(scope)\n\n def is_allowed_origin(self, origin):\n if self.allow_origin_regex:\n return self.allow_origin_regex.match(origin)\n if self.allow_all_origins:\n return True\n\n return origin in self.allow_origins\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n if self.is_allowed_origin(origin=requested_origin):\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}]} | 1,739 | 732 |
gh_patches_debug_12814 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Run tests with warnings as errors
This is harder than it sounds, because practically everything in a Python stack emits warnings. See #1124, #1149, and #1169 for previous work on this.
One lead is to check whether we can put e.g. `,default:::setuptools,default:::pip` at the end of the [`PYTHONWARNINGS`](https://docs.python.org/3/using/cmdline.html#cmdoption-w) variable, which would exempt build-time dependencies (that we can't fix) from `=error,` at the front. Unclear from the docs whether we can use one entry per package, or if it's one per file.
Internally, we get a `ResourceWarning` from failing to close a [`branch-check` file](https://github.com/HypothesisWorks/hypothesis-python/blob/06a6ce681b8f13676ae2b674c559c413f0f3dbac/src/hypothesis/internal/coverage.py#L60) in the coverage job (fixable with a minor refactor to use `mode='a'`). There are probably more warnings that we simply don't notice at the moment.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/hypothesis/internal/coverage.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22 import json
23 from contextlib import contextmanager
24
25 from hypothesis.internal.reflection import proxies
26
27 """
28 This module implements a custom coverage system that records conditions and
29 then validates that every condition has been seen to be both True and False
30 during the execution of our tests.
31
32 The only thing we use it for at present is our argument validation functions,
33 where we assert that every validation function has been seen to both pass and
34 fail in the course of testing.
35
36 When not running with a magic environment variable set, this module disables
37 itself and has essentially no overhead.
38 """
39
40 pretty_file_name_cache = {}
41
42
43 def pretty_file_name(f):
44 try:
45 return pretty_file_name_cache[f]
46 except KeyError:
47 pass
48
49 parts = f.split(os.path.sep)
50 parts = parts[parts.index('hypothesis'):]
51 result = os.path.sep.join(parts)
52 pretty_file_name_cache[f] = result
53 return result
54
55
56 IN_COVERAGE_TESTS = os.getenv('HYPOTHESIS_INTERNAL_COVERAGE') == 'true'
57
58
59 if IN_COVERAGE_TESTS:
60 log = open('branch-check', 'w')
61 written = set()
62
63 def record_branch(name, value):
64 key = (name, value)
65 if key in written:
66 return
67 written.add(key)
68 log.write(
69 json.dumps({'name': name, 'value': value})
70 )
71 log.write('\n')
72 log.flush()
73
74 description_stack = []
75
76 @contextmanager
77 def check_block(name, depth):
78 # We add an extra two callers to the stack: One for the contextmanager
79 # function, one for our actual caller, so we want to go two extra
80 # stack frames up.
81 caller = sys._getframe(depth + 2)
82 local_description = '%s at %s:%d' % (
83 name,
84 pretty_file_name(caller.f_code.co_filename),
85 caller.f_lineno,
86 )
87 try:
88 description_stack.append(local_description)
89 description = ' in '.join(reversed(description_stack)) + ' passed'
90 yield
91 record_branch(description, True)
92 except BaseException:
93 record_branch(description, False)
94 raise
95 finally:
96 description_stack.pop()
97
98 @contextmanager
99 def check(name):
100 with check_block(name, 2):
101 yield
102
103 def check_function(f):
104 @proxies(f)
105 def accept(*args, **kwargs):
106 # depth of 2 because of the proxy function calling us.
107 with check_block(f.__name__, 2):
108 return f(*args, **kwargs)
109 return accept
110 else:
111 def check_function(f):
112 return f
113
114 @contextmanager
115 def check(name):
116 yield
117
118
119 class suppress_tracing(object):
120 def __enter__(self):
121 self.__original_trace = sys.gettrace()
122 sys.settrace(None)
123
124 def __exit__(self, exc_type, exc_value, traceback):
125 sys.settrace(self.__original_trace)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/hypothesis/internal/coverage.py b/src/hypothesis/internal/coverage.py
--- a/src/hypothesis/internal/coverage.py
+++ b/src/hypothesis/internal/coverage.py
@@ -57,7 +57,8 @@
if IN_COVERAGE_TESTS:
- log = open('branch-check', 'w')
+ with open('branch-check', 'w'):
+ pass
written = set()
def record_branch(name, value):
@@ -65,11 +66,8 @@
if key in written:
return
written.add(key)
- log.write(
- json.dumps({'name': name, 'value': value})
- )
- log.write('\n')
- log.flush()
+ with open('branch-check', 'a') as log:
+ log.write(json.dumps({'name': name, 'value': value}) + '\n')
description_stack = []
| {"golden_diff": "diff --git a/src/hypothesis/internal/coverage.py b/src/hypothesis/internal/coverage.py\n--- a/src/hypothesis/internal/coverage.py\n+++ b/src/hypothesis/internal/coverage.py\n@@ -57,7 +57,8 @@\n \n \n if IN_COVERAGE_TESTS:\n- log = open('branch-check', 'w')\n+ with open('branch-check', 'w'):\n+ pass\n written = set()\n \n def record_branch(name, value):\n@@ -65,11 +66,8 @@\n if key in written:\n return\n written.add(key)\n- log.write(\n- json.dumps({'name': name, 'value': value})\n- )\n- log.write('\\n')\n- log.flush()\n+ with open('branch-check', 'a') as log:\n+ log.write(json.dumps({'name': name, 'value': value}) + '\\n')\n \n description_stack = []\n", "issue": "Run tests with warnings as errors\nThis is harder than it sounds, because practically everything in a Python stack emits warnings. See #1124, #1149, and #1169 for previous work on this.\r\n\r\nOne lead is to check whether we can put e.g. `,default:::setuptools,default:::pip` at the end of the [`PYTHONWARNINGS`](https://docs.python.org/3/using/cmdline.html#cmdoption-w) variable, which would exempt build-time dependencies (that we can't fix) from `=error,` at the front. Unclear from the docs whether we can use one entry per package, or if it's one per file.\r\n\r\nInternally, we get a `ResourceWarning` from failing to close a [`branch-check` file](https://github.com/HypothesisWorks/hypothesis-python/blob/06a6ce681b8f13676ae2b674c559c413f0f3dbac/src/hypothesis/internal/coverage.py#L60) in the coverage job (fixable with a minor refactor to use `mode='a'`). There are probably more warnings that we simply don't notice at the moment.\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport json\nfrom contextlib import contextmanager\n\nfrom hypothesis.internal.reflection import proxies\n\n\"\"\"\nThis module implements a custom coverage system that records conditions and\nthen validates that every condition has been seen to be both True and False\nduring the execution of our tests.\n\nThe only thing we use it for at present is our argument validation functions,\nwhere we assert that every validation function has been seen to both pass and\nfail in the course of testing.\n\nWhen not running with a magic environment variable set, this module disables\nitself and has essentially no overhead.\n\"\"\"\n\npretty_file_name_cache = {}\n\n\ndef pretty_file_name(f):\n try:\n return pretty_file_name_cache[f]\n except KeyError:\n pass\n\n parts = f.split(os.path.sep)\n parts = parts[parts.index('hypothesis'):]\n result = os.path.sep.join(parts)\n pretty_file_name_cache[f] = result\n return result\n\n\nIN_COVERAGE_TESTS = os.getenv('HYPOTHESIS_INTERNAL_COVERAGE') == 'true'\n\n\nif IN_COVERAGE_TESTS:\n log = open('branch-check', 'w')\n written = set()\n\n def record_branch(name, value):\n key = (name, value)\n if key in written:\n return\n written.add(key)\n log.write(\n json.dumps({'name': name, 'value': value})\n )\n log.write('\\n')\n log.flush()\n\n description_stack = []\n\n @contextmanager\n def check_block(name, depth):\n # We add an extra two callers to the stack: One for the contextmanager\n # function, one for our actual caller, so we want to go two extra\n # stack frames up.\n caller = sys._getframe(depth + 2)\n local_description = '%s at %s:%d' % (\n name,\n pretty_file_name(caller.f_code.co_filename),\n caller.f_lineno,\n )\n try:\n description_stack.append(local_description)\n description = ' in '.join(reversed(description_stack)) + ' passed'\n yield\n record_branch(description, True)\n except BaseException:\n record_branch(description, False)\n raise\n finally:\n description_stack.pop()\n\n @contextmanager\n def check(name):\n with check_block(name, 2):\n yield\n\n def check_function(f):\n @proxies(f)\n def accept(*args, **kwargs):\n # depth of 2 because of the proxy function calling us.\n with check_block(f.__name__, 2):\n return f(*args, **kwargs)\n return accept\nelse:\n def check_function(f):\n return f\n\n @contextmanager\n def check(name):\n yield\n\n\nclass suppress_tracing(object):\n def __enter__(self):\n self.__original_trace = sys.gettrace()\n sys.settrace(None)\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.settrace(self.__original_trace)\n", "path": "src/hypothesis/internal/coverage.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport json\nfrom contextlib import contextmanager\n\nfrom hypothesis.internal.reflection import proxies\n\n\"\"\"\nThis module implements a custom coverage system that records conditions and\nthen validates that every condition has been seen to be both True and False\nduring the execution of our tests.\n\nThe only thing we use it for at present is our argument validation functions,\nwhere we assert that every validation function has been seen to both pass and\nfail in the course of testing.\n\nWhen not running with a magic environment variable set, this module disables\nitself and has essentially no overhead.\n\"\"\"\n\npretty_file_name_cache = {}\n\n\ndef pretty_file_name(f):\n try:\n return pretty_file_name_cache[f]\n except KeyError:\n pass\n\n parts = f.split(os.path.sep)\n parts = parts[parts.index('hypothesis'):]\n result = os.path.sep.join(parts)\n pretty_file_name_cache[f] = result\n return result\n\n\nIN_COVERAGE_TESTS = os.getenv('HYPOTHESIS_INTERNAL_COVERAGE') == 'true'\n\n\nif IN_COVERAGE_TESTS:\n with open('branch-check', 'w'):\n pass\n written = set()\n\n def record_branch(name, value):\n key = (name, value)\n if key in written:\n return\n written.add(key)\n with open('branch-check', 'a') as log:\n log.write(json.dumps({'name': name, 'value': value}) + '\\n')\n\n description_stack = []\n\n @contextmanager\n def check_block(name, depth):\n # We add an extra two callers to the stack: One for the contextmanager\n # function, one for our actual caller, so we want to go two extra\n # stack frames up.\n caller = sys._getframe(depth + 2)\n local_description = '%s at %s:%d' % (\n name,\n pretty_file_name(caller.f_code.co_filename),\n caller.f_lineno,\n )\n try:\n description_stack.append(local_description)\n description = ' in '.join(reversed(description_stack)) + ' passed'\n yield\n record_branch(description, True)\n except BaseException:\n record_branch(description, False)\n raise\n finally:\n description_stack.pop()\n\n @contextmanager\n def check(name):\n with check_block(name, 2):\n yield\n\n def check_function(f):\n @proxies(f)\n def accept(*args, **kwargs):\n # depth of 2 because of the proxy function calling us.\n with check_block(f.__name__, 2):\n return f(*args, **kwargs)\n return accept\nelse:\n def check_function(f):\n return f\n\n @contextmanager\n def check(name):\n yield\n\n\nclass suppress_tracing(object):\n def __enter__(self):\n self.__original_trace = sys.gettrace()\n sys.settrace(None)\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.settrace(self.__original_trace)\n", "path": "src/hypothesis/internal/coverage.py"}]} | 1,634 | 205 |
gh_patches_debug_18261 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4296 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing template translations confirm-email
**URL:** https://meinberlin-dev.liqd.net/accounts/confirm-email/
**user:** user who want to register
**expected behaviour:**
Translation is shown https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/locale/de_DE/LC_MESSAGES/django.po#L2180
From mail-override text https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/meinberlin/apps/contrib/django_standard_messages.py#L13
**behaviour:** django-allauth text is used and not translated https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/verified_email_required.html#L16
**important screensize:**
**device & browser:**
**Comment/Question:**
The same is true for https://meinberlin-dev.liqd.net/accounts/password/reset/done/ which also shows the django-allauth text https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/password_reset_done.html#L15
But this is the same issue, as override text and translation are also actually in our repo.
Screenshot?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/contrib/django_standard_messages.py`
Content:
```
1 def _(s):
2 return s
3
4
5 django_standard_messages_to_override = [
6 _("You have signed out."),
7 _("Verify Your E-mail Address"),
8 _("You must type the same password each time."),
9 _("You have confirmed %(email)s."),
10 _("You cannot remove your primary e-mail address (%(email)s)."),
11 _("We have sent you an e-mail. Please contact us if "
12 "you do not receive it within a few minutes."),
13 _("We have sent an e-mail to you for verification. "
14 "Follow the link provided to finalize the signup process. "
15 "Please contact us if you do not receive it within a few minutes."),
16 _(u'You must select a minimum of %(limit_value)d choices.'),
17 _(u'You must select a maximum of %(limit_value)d choices.'),
18 _('Enter a valid email address.')
19 ]
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py
--- a/meinberlin/apps/contrib/django_standard_messages.py
+++ b/meinberlin/apps/contrib/django_standard_messages.py
@@ -12,7 +12,13 @@
"you do not receive it within a few minutes."),
_("We have sent an e-mail to you for verification. "
"Follow the link provided to finalize the signup process. "
- "Please contact us if you do not receive it within a few minutes."),
+ "If you do not see the verification e-mail in your main inbox, "
+ "check your spam folder. "
+ "Please contact us if you do not receive the verification e-mail "
+ "within a few minutes."),
+ _("We have sent you an e-mail. If you have not received it "
+ "please check your spam folder. Otherwise contact us if you "
+ "do not receive it in a few minutes."),
_(u'You must select a minimum of %(limit_value)d choices.'),
_(u'You must select a maximum of %(limit_value)d choices.'),
_('Enter a valid email address.')
| {"golden_diff": "diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py\n--- a/meinberlin/apps/contrib/django_standard_messages.py\n+++ b/meinberlin/apps/contrib/django_standard_messages.py\n@@ -12,7 +12,13 @@\n \"you do not receive it within a few minutes.\"),\n _(\"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n- \"Please contact us if you do not receive it within a few minutes.\"),\n+ \"If you do not see the verification e-mail in your main inbox, \"\n+ \"check your spam folder. \"\n+ \"Please contact us if you do not receive the verification e-mail \"\n+ \"within a few minutes.\"),\n+ _(\"We have sent you an e-mail. If you have not received it \"\n+ \"please check your spam folder. Otherwise contact us if you \"\n+ \"do not receive it in a few minutes.\"),\n _(u'You must select a minimum of %(limit_value)d choices.'),\n _(u'You must select a maximum of %(limit_value)d choices.'),\n _('Enter a valid email address.')\n", "issue": "Missing template translations confirm-email\n**URL:** https://meinberlin-dev.liqd.net/accounts/confirm-email/\r\n**user:** user who want to register\r\n**expected behaviour:** \r\nTranslation is shown https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/locale/de_DE/LC_MESSAGES/django.po#L2180\r\nFrom mail-override text https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/meinberlin/apps/contrib/django_standard_messages.py#L13\r\n**behaviour:** django-allauth text is used and not translated https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/verified_email_required.html#L16\r\n**important screensize:**\r\n**device & browser:** \r\n\r\n**Comment/Question:** \r\nThe same is true for https://meinberlin-dev.liqd.net/accounts/password/reset/done/ which also shows the django-allauth text https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/password_reset_done.html#L15\r\nBut this is the same issue, as override text and translation are also actually in our repo. \r\n\r\nScreenshot?\r\n\r\n\r\n\n", "before_files": [{"content": "def _(s):\n return s\n\n\ndjango_standard_messages_to_override = [\n _(\"You have signed out.\"),\n _(\"Verify Your E-mail Address\"),\n _(\"You must type the same password each time.\"),\n _(\"You have confirmed %(email)s.\"),\n _(\"You cannot remove your primary e-mail address (%(email)s).\"),\n _(\"We have sent you an e-mail. Please contact us if \"\n \"you do not receive it within a few minutes.\"),\n _(\"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n \"Please contact us if you do not receive it within a few minutes.\"),\n _(u'You must select a minimum of %(limit_value)d choices.'),\n _(u'You must select a maximum of %(limit_value)d choices.'),\n _('Enter a valid email address.')\n]\n", "path": "meinberlin/apps/contrib/django_standard_messages.py"}], "after_files": [{"content": "def _(s):\n return s\n\n\ndjango_standard_messages_to_override = [\n _(\"You have signed out.\"),\n _(\"Verify Your E-mail Address\"),\n _(\"You must type the same password each time.\"),\n _(\"You have confirmed %(email)s.\"),\n _(\"You cannot remove your primary e-mail address (%(email)s).\"),\n _(\"We have sent you an e-mail. Please contact us if \"\n \"you do not receive it within a few minutes.\"),\n _(\"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n \"If you do not see the verification e-mail in your main inbox, \"\n \"check your spam folder. \"\n \"Please contact us if you do not receive the verification e-mail \"\n \"within a few minutes.\"),\n _(\"We have sent you an e-mail. If you have not received it \"\n \"please check your spam folder. Otherwise contact us if you \"\n \"do not receive it in a few minutes.\"),\n _(u'You must select a minimum of %(limit_value)d choices.'),\n _(u'You must select a maximum of %(limit_value)d choices.'),\n _('Enter a valid email address.')\n]\n", "path": "meinberlin/apps/contrib/django_standard_messages.py"}]} | 926 | 267 |
gh_patches_debug_9637 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2029 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`check-useless-excludes` meta hook doesn't seem to work well with broken symlinks
Repro steps
1. Add a submodule
2. Add a symlink to somewhere inside this submodule
3. Use `check-symlinks`
4. Deinit submodule (to simulate the case when people clone without `--recursive`)
4. Notice that it now complains about the symlink created on 2
5. Add an exclude entry with the path to that symlink
6. Use `check-useless-excludes`
7. Notice that it complains that the added exclude entry doesn't apply.
For a working example, check out https://github.com/pkoch/test-check-useless-excludes (without `--recursive`, or run `git submodule deinit --all`) and run `pre-commit run --all`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/meta_hooks/check_useless_excludes.py`
Content:
```
1 import argparse
2 import re
3 from typing import Optional
4 from typing import Sequence
5
6 from cfgv import apply_defaults
7
8 import pre_commit.constants as C
9 from pre_commit import git
10 from pre_commit.clientlib import load_config
11 from pre_commit.clientlib import MANIFEST_HOOK_DICT
12 from pre_commit.commands.run import Classifier
13
14
15 def exclude_matches_any(
16 filenames: Sequence[str],
17 include: str,
18 exclude: str,
19 ) -> bool:
20 if exclude == '^$':
21 return True
22 include_re, exclude_re = re.compile(include), re.compile(exclude)
23 for filename in filenames:
24 if include_re.search(filename) and exclude_re.search(filename):
25 return True
26 return False
27
28
29 def check_useless_excludes(config_file: str) -> int:
30 config = load_config(config_file)
31 filenames = git.get_all_files()
32 classifier = Classifier.from_config(
33 filenames, config['files'], config['exclude'],
34 )
35 retv = 0
36
37 exclude = config['exclude']
38 if not exclude_matches_any(filenames, '', exclude):
39 print(
40 f'The global exclude pattern {exclude!r} does not match any files',
41 )
42 retv = 1
43
44 for repo in config['repos']:
45 for hook in repo['hooks']:
46 # Not actually a manifest dict, but this more accurately reflects
47 # the defaults applied during runtime
48 hook = apply_defaults(hook, MANIFEST_HOOK_DICT)
49 names = classifier.filenames
50 types = hook['types']
51 types_or = hook['types_or']
52 exclude_types = hook['exclude_types']
53 names = classifier.by_types(names, types, types_or, exclude_types)
54 include, exclude = hook['files'], hook['exclude']
55 if not exclude_matches_any(names, include, exclude):
56 print(
57 f'The exclude pattern {exclude!r} for {hook["id"]} does '
58 f'not match any files',
59 )
60 retv = 1
61
62 return retv
63
64
65 def main(argv: Optional[Sequence[str]] = None) -> int:
66 parser = argparse.ArgumentParser()
67 parser.add_argument('filenames', nargs='*', default=[C.CONFIG_FILE])
68 args = parser.parse_args(argv)
69
70 retv = 0
71 for filename in args.filenames:
72 retv |= check_useless_excludes(filename)
73 return retv
74
75
76 if __name__ == '__main__':
77 exit(main())
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/meta_hooks/check_useless_excludes.py b/pre_commit/meta_hooks/check_useless_excludes.py
--- a/pre_commit/meta_hooks/check_useless_excludes.py
+++ b/pre_commit/meta_hooks/check_useless_excludes.py
@@ -43,6 +43,9 @@
for repo in config['repos']:
for hook in repo['hooks']:
+ # the default of manifest hooks is `types: [file]` but we may
+ # be configuring a symlink hook while there's a broken symlink
+ hook.setdefault('types', [])
# Not actually a manifest dict, but this more accurately reflects
# the defaults applied during runtime
hook = apply_defaults(hook, MANIFEST_HOOK_DICT)
| {"golden_diff": "diff --git a/pre_commit/meta_hooks/check_useless_excludes.py b/pre_commit/meta_hooks/check_useless_excludes.py\n--- a/pre_commit/meta_hooks/check_useless_excludes.py\n+++ b/pre_commit/meta_hooks/check_useless_excludes.py\n@@ -43,6 +43,9 @@\n \n for repo in config['repos']:\n for hook in repo['hooks']:\n+ # the default of manifest hooks is `types: [file]` but we may\n+ # be configuring a symlink hook while there's a broken symlink\n+ hook.setdefault('types', [])\n # Not actually a manifest dict, but this more accurately reflects\n # the defaults applied during runtime\n hook = apply_defaults(hook, MANIFEST_HOOK_DICT)\n", "issue": "`check-useless-excludes` meta hook doesn't seem to work well with broken symlinks\nRepro steps\r\n1. Add a submodule\r\n2. Add a symlink to somewhere inside this submodule\r\n3. Use `check-symlinks`\r\n4. Deinit submodule (to simulate the case when people clone without `--recursive`)\r\n4. Notice that it now complains about the symlink created on 2\r\n5. Add an exclude entry with the path to that symlink\r\n6. Use `check-useless-excludes`\r\n7. Notice that it complains that the added exclude entry doesn't apply.\r\n\r\nFor a working example, check out https://github.com/pkoch/test-check-useless-excludes (without `--recursive`, or run `git submodule deinit --all`) and run `pre-commit run --all`.\n", "before_files": [{"content": "import argparse\nimport re\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom cfgv import apply_defaults\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import MANIFEST_HOOK_DICT\nfrom pre_commit.commands.run import Classifier\n\n\ndef exclude_matches_any(\n filenames: Sequence[str],\n include: str,\n exclude: str,\n) -> bool:\n if exclude == '^$':\n return True\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n for filename in filenames:\n if include_re.search(filename) and exclude_re.search(filename):\n return True\n return False\n\n\ndef check_useless_excludes(config_file: str) -> int:\n config = load_config(config_file)\n filenames = git.get_all_files()\n classifier = Classifier.from_config(\n filenames, config['files'], config['exclude'],\n )\n retv = 0\n\n exclude = config['exclude']\n if not exclude_matches_any(filenames, '', exclude):\n print(\n f'The global exclude pattern {exclude!r} does not match any files',\n )\n retv = 1\n\n for repo in config['repos']:\n for hook in repo['hooks']:\n # Not actually a manifest dict, but this more accurately reflects\n # the defaults applied during runtime\n hook = apply_defaults(hook, MANIFEST_HOOK_DICT)\n names = classifier.filenames\n types = hook['types']\n types_or = hook['types_or']\n exclude_types = hook['exclude_types']\n names = classifier.by_types(names, types, types_or, exclude_types)\n include, exclude = hook['files'], hook['exclude']\n if not exclude_matches_any(names, include, exclude):\n print(\n f'The exclude pattern {exclude!r} for {hook[\"id\"]} does '\n f'not match any files',\n )\n retv = 1\n\n return retv\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', default=[C.CONFIG_FILE])\n args = parser.parse_args(argv)\n\n retv = 0\n for filename in args.filenames:\n retv |= check_useless_excludes(filename)\n return retv\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/meta_hooks/check_useless_excludes.py"}], "after_files": [{"content": "import argparse\nimport re\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom cfgv import apply_defaults\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import MANIFEST_HOOK_DICT\nfrom pre_commit.commands.run import Classifier\n\n\ndef exclude_matches_any(\n filenames: Sequence[str],\n include: str,\n exclude: str,\n) -> bool:\n if exclude == '^$':\n return True\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n for filename in filenames:\n if include_re.search(filename) and exclude_re.search(filename):\n return True\n return False\n\n\ndef check_useless_excludes(config_file: str) -> int:\n config = load_config(config_file)\n filenames = git.get_all_files()\n classifier = Classifier.from_config(\n filenames, config['files'], config['exclude'],\n )\n retv = 0\n\n exclude = config['exclude']\n if not exclude_matches_any(filenames, '', exclude):\n print(\n f'The global exclude pattern {exclude!r} does not match any files',\n )\n retv = 1\n\n for repo in config['repos']:\n for hook in repo['hooks']:\n # the default of manifest hooks is `types: [file]` but we may\n # be configuring a symlink hook while there's a broken symlink\n hook.setdefault('types', [])\n # Not actually a manifest dict, but this more accurately reflects\n # the defaults applied during runtime\n hook = apply_defaults(hook, MANIFEST_HOOK_DICT)\n names = classifier.filenames\n types = hook['types']\n types_or = hook['types_or']\n exclude_types = hook['exclude_types']\n names = classifier.by_types(names, types, types_or, exclude_types)\n include, exclude = hook['files'], hook['exclude']\n if not exclude_matches_any(names, include, exclude):\n print(\n f'The exclude pattern {exclude!r} for {hook[\"id\"]} does '\n f'not match any files',\n )\n retv = 1\n\n return retv\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', default=[C.CONFIG_FILE])\n args = parser.parse_args(argv)\n\n retv = 0\n for filename in args.filenames:\n retv |= check_useless_excludes(filename)\n return retv\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/meta_hooks/check_useless_excludes.py"}]} | 1,100 | 159 |
gh_patches_debug_37578 | rasdani/github-patches | git_diff | zulip__zulip-15713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scrollToHash can raise an exception if the browser does not support it
If the browser is Safari (or Google's webcrawler) and the page was loaded with an anchor of the form `#:~:text=something`, then calling `scrollToHash` will raise `Error: Syntax error, unrecognized expression` here:
https://github.com/zulip/zulip/blob/1676d0b638d91680badc474190a08a52c8dc7a36/web/src/portico/help.js#L112
We should catch it and do nothing, in that case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/views/documentation.py`
Content:
```
1 import os
2 import random
3 import re
4 from collections import OrderedDict
5 from dataclasses import dataclass
6 from typing import Any, Dict, Optional
7
8 from django.conf import settings
9 from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
10 from django.template import loader
11 from django.views.generic import TemplateView
12
13 from zerver.context_processors import zulip_default_context
14 from zerver.decorator import add_google_analytics_context
15 from zerver.lib.integrations import (
16 CATEGORIES,
17 INTEGRATIONS,
18 META_CATEGORY,
19 HubotIntegration,
20 WebhookIntegration,
21 )
22 from zerver.lib.request import REQ, RequestNotes, has_request_variables
23 from zerver.lib.subdomains import get_subdomain
24 from zerver.lib.templates import render_markdown_path
25 from zerver.models import Realm
26 from zerver.openapi.openapi import get_endpoint_from_operationid, get_openapi_summary
27
28
29 @dataclass
30 class DocumentationArticle:
31 article_path: str
32 article_http_status: int
33 endpoint_path: Optional[str]
34 endpoint_method: Optional[str]
35
36
37 def add_api_url_context(context: Dict[str, Any], request: HttpRequest) -> None:
38 context.update(zulip_default_context(request))
39
40 subdomain = get_subdomain(request)
41 if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN or not settings.ROOT_DOMAIN_LANDING_PAGE:
42 display_subdomain = subdomain
43 html_settings_links = True
44 else:
45 display_subdomain = "yourZulipDomain"
46 html_settings_links = False
47
48 display_host = Realm.host_for_subdomain(display_subdomain)
49 api_url_scheme_relative = display_host + "/api"
50 api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative
51 zulip_url = settings.EXTERNAL_URI_SCHEME + display_host
52
53 context["external_url_scheme"] = settings.EXTERNAL_URI_SCHEME
54 context["api_url"] = api_url
55 context["api_url_scheme_relative"] = api_url_scheme_relative
56 context["zulip_url"] = zulip_url
57
58 context["html_settings_links"] = html_settings_links
59
60
61 class ApiURLView(TemplateView):
62 def get_context_data(self, **kwargs: Any) -> Dict[str, str]:
63 context = super().get_context_data(**kwargs)
64 add_api_url_context(context, self.request)
65 return context
66
67
68 class MarkdownDirectoryView(ApiURLView):
69 path_template = ""
70 policies_view = False
71 help_view = False
72 api_doc_view = False
73
74 def get_path(self, article: str) -> DocumentationArticle:
75 http_status = 200
76 if article == "":
77 article = "index"
78 elif article == "include/sidebar_index":
79 pass
80 elif article == "api-doc-template":
81 # This markdown template shouldn't be accessed directly.
82 article = "missing"
83 http_status = 404
84 elif "/" in article:
85 article = "missing"
86 http_status = 404
87 elif len(article) > 100 or not re.match("^[0-9a-zA-Z_-]+$", article):
88 article = "missing"
89 http_status = 404
90
91 path = self.path_template % (article,)
92 endpoint_name = None
93 endpoint_method = None
94
95 if not self.path_template.startswith("/"):
96 # Relative paths only used for policies documentation
97 # when it is not configured or in the dev environment
98 assert self.policies_view
99
100 try:
101 loader.get_template(path)
102 return DocumentationArticle(
103 article_path=path,
104 article_http_status=http_status,
105 endpoint_path=endpoint_name,
106 endpoint_method=endpoint_method,
107 )
108 except loader.TemplateDoesNotExist:
109 return DocumentationArticle(
110 article_path=self.path_template % ("missing",),
111 article_http_status=404,
112 endpoint_path=None,
113 endpoint_method=None,
114 )
115
116 if not os.path.exists(path):
117 if self.api_doc_view:
118 try:
119 # API endpoints documented in zerver/openapi/zulip.yaml
120 endpoint_name, endpoint_method = get_endpoint_from_operationid(article)
121 path = self.path_template % ("api-doc-template",)
122 except AssertionError:
123 return DocumentationArticle(
124 article_path=self.path_template % ("missing",),
125 article_http_status=404,
126 endpoint_path=None,
127 endpoint_method=None,
128 )
129 elif self.help_view or self.policies_view:
130 article = "missing"
131 http_status = 404
132 path = self.path_template % (article,)
133 else:
134 raise AssertionError("Invalid documentation view type")
135
136 return DocumentationArticle(
137 article_path=path,
138 article_http_status=http_status,
139 endpoint_path=endpoint_name,
140 endpoint_method=endpoint_method,
141 )
142
143 def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
144 article = kwargs["article"]
145 context: Dict[str, Any] = super().get_context_data()
146
147 documentation_article = self.get_path(article)
148 context["article"] = documentation_article.article_path
149 not_index_page = not context["article"].endswith("/index.md")
150
151 if documentation_article.article_path.startswith("/") and os.path.exists(
152 documentation_article.article_path
153 ):
154 # Absolute path case
155 article_absolute_path = documentation_article.article_path
156 else:
157 # Relative path case
158 article_absolute_path = os.path.join(
159 settings.DEPLOY_ROOT, "templates", documentation_article.article_path
160 )
161
162 if self.help_view:
163 context["page_is_help_center"] = True
164 context["doc_root"] = "/help/"
165 context["doc_root_title"] = "Help center"
166 sidebar_article = self.get_path("include/sidebar_index")
167 sidebar_index = sidebar_article.article_path
168 title_base = "Zulip help center"
169 elif self.policies_view:
170 context["page_is_policy_center"] = True
171 context["doc_root"] = "/policies/"
172 context["doc_root_title"] = "Terms and policies"
173 sidebar_article = self.get_path("sidebar_index")
174 sidebar_index = sidebar_article.article_path
175 title_base = "Zulip terms and policies"
176 elif self.api_doc_view:
177 context["page_is_api_center"] = True
178 context["doc_root"] = "/api/"
179 context["doc_root_title"] = "API documentation"
180 sidebar_article = self.get_path("sidebar_index")
181 sidebar_index = sidebar_article.article_path
182 title_base = "Zulip API documentation"
183 else:
184 raise AssertionError("Invalid documentation view type")
185
186 # The following is a somewhat hacky approach to extract titles from articles.
187 endpoint_name = None
188 endpoint_method = None
189 if os.path.exists(article_absolute_path):
190 with open(article_absolute_path) as article_file:
191 first_line = article_file.readlines()[0]
192 if self.api_doc_view and context["article"].endswith("api-doc-template.md"):
193 endpoint_name, endpoint_method = (
194 documentation_article.endpoint_path,
195 documentation_article.endpoint_method,
196 )
197 assert endpoint_name is not None
198 assert endpoint_method is not None
199 article_title = get_openapi_summary(endpoint_name, endpoint_method)
200 elif self.api_doc_view and "{generate_api_header(" in first_line:
201 api_operation = context["PAGE_METADATA_URL"].split("/api/")[1]
202 endpoint_name, endpoint_method = get_endpoint_from_operationid(api_operation)
203 article_title = get_openapi_summary(endpoint_name, endpoint_method)
204 else:
205 # Strip the header and then use the first line to get the article title
206 article_title = first_line.lstrip("#").strip()
207 endpoint_name = endpoint_method = None
208 if not_index_page:
209 context["PAGE_TITLE"] = f"{article_title} | {title_base}"
210 else:
211 context["PAGE_TITLE"] = title_base
212 request_notes = RequestNotes.get_notes(self.request)
213 request_notes.placeholder_open_graph_description = (
214 f"REPLACEMENT_PAGE_DESCRIPTION_{int(2**24 * random.random())}"
215 )
216 context["PAGE_DESCRIPTION"] = request_notes.placeholder_open_graph_description
217
218 context["sidebar_index"] = sidebar_index
219 # An "article" might require the api_url_context to be rendered
220 api_url_context: Dict[str, Any] = {}
221 add_api_url_context(api_url_context, self.request)
222 api_url_context["run_content_validators"] = True
223 context["api_url_context"] = api_url_context
224 if endpoint_name and endpoint_method:
225 context["api_url_context"]["API_ENDPOINT_NAME"] = endpoint_name + ":" + endpoint_method
226 add_google_analytics_context(context)
227 return context
228
229 def get(
230 self, request: HttpRequest, *args: object, article: str = "", **kwargs: object
231 ) -> HttpResponse:
232 # Hack: It's hard to reinitialize urls.py from tests, and so
233 # we want to defer the use of settings.POLICIES_DIRECTORY to
234 # runtime.
235 if self.policies_view:
236 self.path_template = f"{settings.POLICIES_DIRECTORY}/%s.md"
237
238 documentation_article = self.get_path(article)
239 http_status = documentation_article.article_http_status
240 result = super().get(request, article=article)
241 if http_status != 200:
242 result.status_code = http_status
243 return result
244
245
246 def add_integrations_context(context: Dict[str, Any]) -> None:
247 alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))
248 alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))
249 enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))
250 # Subtract 1 so saying "Over X integrations" is correct. Then,
251 # round down to the nearest multiple of 10.
252 integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10
253 context["categories_dict"] = alphabetical_sorted_categories
254 context["integrations_dict"] = alphabetical_sorted_integration
255 context["integrations_count_display"] = integrations_count_display
256
257
258 def add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:
259 path_name = request.path.rstrip("/").split("/")[-1]
260 description = (
261 "Zulip comes with over a hundred native integrations out of the box, "
262 "and integrates with Zapier and IFTTT to provide hundreds more. "
263 "Connect the apps you use every day to Zulip."
264 )
265
266 if path_name in INTEGRATIONS:
267 integration = INTEGRATIONS[path_name]
268 context["PAGE_TITLE"] = f"{integration.display_name} | Zulip integrations"
269 context["PAGE_DESCRIPTION"] = description
270
271 elif path_name in CATEGORIES:
272 category = CATEGORIES[path_name]
273 if path_name in META_CATEGORY:
274 context["PAGE_TITLE"] = f"{category} | Zulip integrations"
275 else:
276 context["PAGE_TITLE"] = f"{category} tools | Zulip integrations"
277 context["PAGE_DESCRIPTION"] = description
278
279 elif path_name == "integrations":
280 context["PAGE_TITLE"] = "Zulip integrations"
281 context["PAGE_DESCRIPTION"] = description
282
283
284 class IntegrationView(ApiURLView):
285 template_name = "zerver/integrations/index.html"
286
287 def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
288 context: Dict[str, Any] = super().get_context_data(**kwargs)
289 add_integrations_context(context)
290 add_integrations_open_graph_context(context, self.request)
291 add_google_analytics_context(context)
292 return context
293
294
295 @has_request_variables
296 def integration_doc(request: HttpRequest, integration_name: str = REQ()) -> HttpResponse:
297 # FIXME: This check is jQuery-specific.
298 if request.headers.get("x-requested-with") != "XMLHttpRequest":
299 return HttpResponseNotFound()
300
301 try:
302 integration = INTEGRATIONS[integration_name]
303 except KeyError:
304 return HttpResponseNotFound()
305
306 context: Dict[str, Any] = {}
307 add_api_url_context(context, request)
308
309 context["integration_name"] = integration.name
310 context["integration_display_name"] = integration.display_name
311 context["recommended_stream_name"] = integration.stream_name
312 if isinstance(integration, WebhookIntegration):
313 context["integration_url"] = integration.url[3:]
314 if (
315 hasattr(integration.function, "_all_event_types")
316 and integration.function._all_event_types is not None
317 ):
318 context["all_event_types"] = integration.function._all_event_types
319 if isinstance(integration, HubotIntegration):
320 context["hubot_docs_url"] = integration.hubot_docs_url
321
322 doc_html_str = render_markdown_path(integration.doc, context, integration_doc=True)
323
324 return HttpResponse(doc_html_str)
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/views/documentation.py b/zerver/views/documentation.py
--- a/zerver/views/documentation.py
+++ b/zerver/views/documentation.py
@@ -9,6 +9,9 @@
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.template import loader
from django.views.generic import TemplateView
+from lxml import html
+from lxml.etree import Element, SubElement, XPath, _Element
+from markupsafe import Markup
from zerver.context_processors import zulip_default_context
from zerver.decorator import add_google_analytics_context
@@ -65,6 +68,9 @@
return context
+sidebar_links = XPath("//a[@href=$url]")
+
+
class MarkdownDirectoryView(ApiURLView):
path_template = ""
policies_view = False
@@ -215,7 +221,6 @@
)
context["PAGE_DESCRIPTION"] = request_notes.placeholder_open_graph_description
- context["sidebar_index"] = sidebar_index
# An "article" might require the api_url_context to be rendered
api_url_context: Dict[str, Any] = {}
add_api_url_context(api_url_context, self.request)
@@ -223,6 +228,28 @@
context["api_url_context"] = api_url_context
if endpoint_name and endpoint_method:
context["api_url_context"]["API_ENDPOINT_NAME"] = endpoint_name + ":" + endpoint_method
+
+ sidebar_html = render_markdown_path(sidebar_index)
+ tree = html.fragment_fromstring(sidebar_html, create_parent=True)
+ if not context.get("page_is_policy_center", False):
+ home_h1 = Element("h1")
+ home_link = SubElement(home_h1, "a")
+ home_link.attrib["class"] = "no-underline"
+ home_link.attrib["href"] = context["doc_root"]
+ home_link.text = context["doc_root_title"] + " home"
+ tree.insert(0, home_h1)
+ url = context["doc_root"] + article
+ # Highlight current article link
+ links = sidebar_links(tree, url=url)
+ assert isinstance(links, list)
+ for a in links:
+ assert isinstance(a, _Element)
+ old_class = a.attrib.get("class", "")
+ assert isinstance(old_class, str)
+ a.attrib["class"] = old_class + " highlighted"
+ sidebar_html = "".join(html.tostring(child, encoding="unicode") for child in tree)
+ context["sidebar_html"] = Markup(sidebar_html)
+
add_google_analytics_context(context)
return context
| {"golden_diff": "diff --git a/zerver/views/documentation.py b/zerver/views/documentation.py\n--- a/zerver/views/documentation.py\n+++ b/zerver/views/documentation.py\n@@ -9,6 +9,9 @@\n from django.http import HttpRequest, HttpResponse, HttpResponseNotFound\n from django.template import loader\n from django.views.generic import TemplateView\n+from lxml import html\n+from lxml.etree import Element, SubElement, XPath, _Element\n+from markupsafe import Markup\n \n from zerver.context_processors import zulip_default_context\n from zerver.decorator import add_google_analytics_context\n@@ -65,6 +68,9 @@\n return context\n \n \n+sidebar_links = XPath(\"//a[@href=$url]\")\n+\n+\n class MarkdownDirectoryView(ApiURLView):\n path_template = \"\"\n policies_view = False\n@@ -215,7 +221,6 @@\n )\n context[\"PAGE_DESCRIPTION\"] = request_notes.placeholder_open_graph_description\n \n- context[\"sidebar_index\"] = sidebar_index\n # An \"article\" might require the api_url_context to be rendered\n api_url_context: Dict[str, Any] = {}\n add_api_url_context(api_url_context, self.request)\n@@ -223,6 +228,28 @@\n context[\"api_url_context\"] = api_url_context\n if endpoint_name and endpoint_method:\n context[\"api_url_context\"][\"API_ENDPOINT_NAME\"] = endpoint_name + \":\" + endpoint_method\n+\n+ sidebar_html = render_markdown_path(sidebar_index)\n+ tree = html.fragment_fromstring(sidebar_html, create_parent=True)\n+ if not context.get(\"page_is_policy_center\", False):\n+ home_h1 = Element(\"h1\")\n+ home_link = SubElement(home_h1, \"a\")\n+ home_link.attrib[\"class\"] = \"no-underline\"\n+ home_link.attrib[\"href\"] = context[\"doc_root\"]\n+ home_link.text = context[\"doc_root_title\"] + \" home\"\n+ tree.insert(0, home_h1)\n+ url = context[\"doc_root\"] + article\n+ # Highlight current article link\n+ links = sidebar_links(tree, url=url)\n+ assert isinstance(links, list)\n+ for a in links:\n+ assert isinstance(a, _Element)\n+ old_class = a.attrib.get(\"class\", \"\")\n+ assert isinstance(old_class, str)\n+ a.attrib[\"class\"] = old_class + \" highlighted\"\n+ sidebar_html = \"\".join(html.tostring(child, encoding=\"unicode\") for child in tree)\n+ context[\"sidebar_html\"] = Markup(sidebar_html)\n+\n add_google_analytics_context(context)\n return context\n", "issue": "scrollToHash can raise an exception if the browser does not support it\nIf the browser is Safari (or Google's webcrawler) and the page was loaded with an anchor of the form `#:~:text=something`, then calling `scrollToHash` will raise `Error: Syntax error, unrecognized expression` here:\r\n\r\nhttps://github.com/zulip/zulip/blob/1676d0b638d91680badc474190a08a52c8dc7a36/web/src/portico/help.js#L112\r\n\r\nWe should catch it and do nothing, in that case.\n", "before_files": [{"content": "import os\nimport random\nimport re\nfrom collections import OrderedDict\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional\n\nfrom django.conf import settings\nfrom django.http import HttpRequest, HttpResponse, HttpResponseNotFound\nfrom django.template import loader\nfrom django.views.generic import TemplateView\n\nfrom zerver.context_processors import zulip_default_context\nfrom zerver.decorator import add_google_analytics_context\nfrom zerver.lib.integrations import (\n CATEGORIES,\n INTEGRATIONS,\n META_CATEGORY,\n HubotIntegration,\n WebhookIntegration,\n)\nfrom zerver.lib.request import REQ, RequestNotes, has_request_variables\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.lib.templates import render_markdown_path\nfrom zerver.models import Realm\nfrom zerver.openapi.openapi import get_endpoint_from_operationid, get_openapi_summary\n\n\n@dataclass\nclass DocumentationArticle:\n article_path: str\n article_http_status: int\n endpoint_path: Optional[str]\n endpoint_method: Optional[str]\n\n\ndef add_api_url_context(context: Dict[str, Any], request: HttpRequest) -> None:\n context.update(zulip_default_context(request))\n\n subdomain = get_subdomain(request)\n if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN or not settings.ROOT_DOMAIN_LANDING_PAGE:\n display_subdomain = subdomain\n html_settings_links = True\n else:\n display_subdomain = \"yourZulipDomain\"\n html_settings_links = False\n\n display_host = Realm.host_for_subdomain(display_subdomain)\n api_url_scheme_relative = display_host + \"/api\"\n api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative\n zulip_url = settings.EXTERNAL_URI_SCHEME + display_host\n\n context[\"external_url_scheme\"] = settings.EXTERNAL_URI_SCHEME\n context[\"api_url\"] = api_url\n context[\"api_url_scheme_relative\"] = api_url_scheme_relative\n context[\"zulip_url\"] = zulip_url\n\n context[\"html_settings_links\"] = html_settings_links\n\n\nclass ApiURLView(TemplateView):\n def get_context_data(self, **kwargs: Any) -> Dict[str, str]:\n context = super().get_context_data(**kwargs)\n add_api_url_context(context, self.request)\n return context\n\n\nclass MarkdownDirectoryView(ApiURLView):\n path_template = \"\"\n policies_view = False\n help_view = False\n api_doc_view = False\n\n def get_path(self, article: str) -> DocumentationArticle:\n http_status = 200\n if article == \"\":\n article = \"index\"\n elif article == \"include/sidebar_index\":\n pass\n elif article == \"api-doc-template\":\n # This markdown template shouldn't be accessed directly.\n article = \"missing\"\n http_status = 404\n elif \"/\" in article:\n article = \"missing\"\n http_status = 404\n elif len(article) > 100 or not re.match(\"^[0-9a-zA-Z_-]+$\", article):\n article = \"missing\"\n http_status = 404\n\n path = self.path_template % (article,)\n endpoint_name = None\n endpoint_method = None\n\n if not self.path_template.startswith(\"/\"):\n # Relative paths only used for policies documentation\n # when it is not configured or in the dev environment\n assert self.policies_view\n\n try:\n loader.get_template(path)\n return DocumentationArticle(\n article_path=path,\n article_http_status=http_status,\n endpoint_path=endpoint_name,\n endpoint_method=endpoint_method,\n )\n except loader.TemplateDoesNotExist:\n return DocumentationArticle(\n article_path=self.path_template % (\"missing\",),\n article_http_status=404,\n endpoint_path=None,\n endpoint_method=None,\n )\n\n if not os.path.exists(path):\n if self.api_doc_view:\n try:\n # API endpoints documented in zerver/openapi/zulip.yaml\n endpoint_name, endpoint_method = get_endpoint_from_operationid(article)\n path = self.path_template % (\"api-doc-template\",)\n except AssertionError:\n return DocumentationArticle(\n article_path=self.path_template % (\"missing\",),\n article_http_status=404,\n endpoint_path=None,\n endpoint_method=None,\n )\n elif self.help_view or self.policies_view:\n article = \"missing\"\n http_status = 404\n path = self.path_template % (article,)\n else:\n raise AssertionError(\"Invalid documentation view type\")\n\n return DocumentationArticle(\n article_path=path,\n article_http_status=http_status,\n endpoint_path=endpoint_name,\n endpoint_method=endpoint_method,\n )\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n article = kwargs[\"article\"]\n context: Dict[str, Any] = super().get_context_data()\n\n documentation_article = self.get_path(article)\n context[\"article\"] = documentation_article.article_path\n not_index_page = not context[\"article\"].endswith(\"/index.md\")\n\n if documentation_article.article_path.startswith(\"/\") and os.path.exists(\n documentation_article.article_path\n ):\n # Absolute path case\n article_absolute_path = documentation_article.article_path\n else:\n # Relative path case\n article_absolute_path = os.path.join(\n settings.DEPLOY_ROOT, \"templates\", documentation_article.article_path\n )\n\n if self.help_view:\n context[\"page_is_help_center\"] = True\n context[\"doc_root\"] = \"/help/\"\n context[\"doc_root_title\"] = \"Help center\"\n sidebar_article = self.get_path(\"include/sidebar_index\")\n sidebar_index = sidebar_article.article_path\n title_base = \"Zulip help center\"\n elif self.policies_view:\n context[\"page_is_policy_center\"] = True\n context[\"doc_root\"] = \"/policies/\"\n context[\"doc_root_title\"] = \"Terms and policies\"\n sidebar_article = self.get_path(\"sidebar_index\")\n sidebar_index = sidebar_article.article_path\n title_base = \"Zulip terms and policies\"\n elif self.api_doc_view:\n context[\"page_is_api_center\"] = True\n context[\"doc_root\"] = \"/api/\"\n context[\"doc_root_title\"] = \"API documentation\"\n sidebar_article = self.get_path(\"sidebar_index\")\n sidebar_index = sidebar_article.article_path\n title_base = \"Zulip API documentation\"\n else:\n raise AssertionError(\"Invalid documentation view type\")\n\n # The following is a somewhat hacky approach to extract titles from articles.\n endpoint_name = None\n endpoint_method = None\n if os.path.exists(article_absolute_path):\n with open(article_absolute_path) as article_file:\n first_line = article_file.readlines()[0]\n if self.api_doc_view and context[\"article\"].endswith(\"api-doc-template.md\"):\n endpoint_name, endpoint_method = (\n documentation_article.endpoint_path,\n documentation_article.endpoint_method,\n )\n assert endpoint_name is not None\n assert endpoint_method is not None\n article_title = get_openapi_summary(endpoint_name, endpoint_method)\n elif self.api_doc_view and \"{generate_api_header(\" in first_line:\n api_operation = context[\"PAGE_METADATA_URL\"].split(\"/api/\")[1]\n endpoint_name, endpoint_method = get_endpoint_from_operationid(api_operation)\n article_title = get_openapi_summary(endpoint_name, endpoint_method)\n else:\n # Strip the header and then use the first line to get the article title\n article_title = first_line.lstrip(\"#\").strip()\n endpoint_name = endpoint_method = None\n if not_index_page:\n context[\"PAGE_TITLE\"] = f\"{article_title} | {title_base}\"\n else:\n context[\"PAGE_TITLE\"] = title_base\n request_notes = RequestNotes.get_notes(self.request)\n request_notes.placeholder_open_graph_description = (\n f\"REPLACEMENT_PAGE_DESCRIPTION_{int(2**24 * random.random())}\"\n )\n context[\"PAGE_DESCRIPTION\"] = request_notes.placeholder_open_graph_description\n\n context[\"sidebar_index\"] = sidebar_index\n # An \"article\" might require the api_url_context to be rendered\n api_url_context: Dict[str, Any] = {}\n add_api_url_context(api_url_context, self.request)\n api_url_context[\"run_content_validators\"] = True\n context[\"api_url_context\"] = api_url_context\n if endpoint_name and endpoint_method:\n context[\"api_url_context\"][\"API_ENDPOINT_NAME\"] = endpoint_name + \":\" + endpoint_method\n add_google_analytics_context(context)\n return context\n\n def get(\n self, request: HttpRequest, *args: object, article: str = \"\", **kwargs: object\n ) -> HttpResponse:\n # Hack: It's hard to reinitialize urls.py from tests, and so\n # we want to defer the use of settings.POLICIES_DIRECTORY to\n # runtime.\n if self.policies_view:\n self.path_template = f\"{settings.POLICIES_DIRECTORY}/%s.md\"\n\n documentation_article = self.get_path(article)\n http_status = documentation_article.article_http_status\n result = super().get(request, article=article)\n if http_status != 200:\n result.status_code = http_status\n return result\n\n\ndef add_integrations_context(context: Dict[str, Any]) -> None:\n alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))\n alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))\n enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))\n # Subtract 1 so saying \"Over X integrations\" is correct. Then,\n # round down to the nearest multiple of 10.\n integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10\n context[\"categories_dict\"] = alphabetical_sorted_categories\n context[\"integrations_dict\"] = alphabetical_sorted_integration\n context[\"integrations_count_display\"] = integrations_count_display\n\n\ndef add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:\n path_name = request.path.rstrip(\"/\").split(\"/\")[-1]\n description = (\n \"Zulip comes with over a hundred native integrations out of the box, \"\n \"and integrates with Zapier and IFTTT to provide hundreds more. \"\n \"Connect the apps you use every day to Zulip.\"\n )\n\n if path_name in INTEGRATIONS:\n integration = INTEGRATIONS[path_name]\n context[\"PAGE_TITLE\"] = f\"{integration.display_name} | Zulip integrations\"\n context[\"PAGE_DESCRIPTION\"] = description\n\n elif path_name in CATEGORIES:\n category = CATEGORIES[path_name]\n if path_name in META_CATEGORY:\n context[\"PAGE_TITLE\"] = f\"{category} | Zulip integrations\"\n else:\n context[\"PAGE_TITLE\"] = f\"{category} tools | Zulip integrations\"\n context[\"PAGE_DESCRIPTION\"] = description\n\n elif path_name == \"integrations\":\n context[\"PAGE_TITLE\"] = \"Zulip integrations\"\n context[\"PAGE_DESCRIPTION\"] = description\n\n\nclass IntegrationView(ApiURLView):\n template_name = \"zerver/integrations/index.html\"\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context: Dict[str, Any] = super().get_context_data(**kwargs)\n add_integrations_context(context)\n add_integrations_open_graph_context(context, self.request)\n add_google_analytics_context(context)\n return context\n\n\n@has_request_variables\ndef integration_doc(request: HttpRequest, integration_name: str = REQ()) -> HttpResponse:\n # FIXME: This check is jQuery-specific.\n if request.headers.get(\"x-requested-with\") != \"XMLHttpRequest\":\n return HttpResponseNotFound()\n\n try:\n integration = INTEGRATIONS[integration_name]\n except KeyError:\n return HttpResponseNotFound()\n\n context: Dict[str, Any] = {}\n add_api_url_context(context, request)\n\n context[\"integration_name\"] = integration.name\n context[\"integration_display_name\"] = integration.display_name\n context[\"recommended_stream_name\"] = integration.stream_name\n if isinstance(integration, WebhookIntegration):\n context[\"integration_url\"] = integration.url[3:]\n if (\n hasattr(integration.function, \"_all_event_types\")\n and integration.function._all_event_types is not None\n ):\n context[\"all_event_types\"] = integration.function._all_event_types\n if isinstance(integration, HubotIntegration):\n context[\"hubot_docs_url\"] = integration.hubot_docs_url\n\n doc_html_str = render_markdown_path(integration.doc, context, integration_doc=True)\n\n return HttpResponse(doc_html_str)\n", "path": "zerver/views/documentation.py"}], "after_files": [{"content": "import os\nimport random\nimport re\nfrom collections import OrderedDict\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional\n\nfrom django.conf import settings\nfrom django.http import HttpRequest, HttpResponse, HttpResponseNotFound\nfrom django.template import loader\nfrom django.views.generic import TemplateView\nfrom lxml import html\nfrom lxml.etree import Element, SubElement, XPath, _Element\nfrom markupsafe import Markup\n\nfrom zerver.context_processors import zulip_default_context\nfrom zerver.decorator import add_google_analytics_context\nfrom zerver.lib.integrations import (\n CATEGORIES,\n INTEGRATIONS,\n META_CATEGORY,\n HubotIntegration,\n WebhookIntegration,\n)\nfrom zerver.lib.request import REQ, RequestNotes, has_request_variables\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.lib.templates import render_markdown_path\nfrom zerver.models import Realm\nfrom zerver.openapi.openapi import get_endpoint_from_operationid, get_openapi_summary\n\n\n@dataclass\nclass DocumentationArticle:\n article_path: str\n article_http_status: int\n endpoint_path: Optional[str]\n endpoint_method: Optional[str]\n\n\ndef add_api_url_context(context: Dict[str, Any], request: HttpRequest) -> None:\n context.update(zulip_default_context(request))\n\n subdomain = get_subdomain(request)\n if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN or not settings.ROOT_DOMAIN_LANDING_PAGE:\n display_subdomain = subdomain\n html_settings_links = True\n else:\n display_subdomain = \"yourZulipDomain\"\n html_settings_links = False\n\n display_host = Realm.host_for_subdomain(display_subdomain)\n api_url_scheme_relative = display_host + \"/api\"\n api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative\n zulip_url = settings.EXTERNAL_URI_SCHEME + display_host\n\n context[\"external_url_scheme\"] = settings.EXTERNAL_URI_SCHEME\n context[\"api_url\"] = api_url\n context[\"api_url_scheme_relative\"] = api_url_scheme_relative\n context[\"zulip_url\"] = zulip_url\n\n context[\"html_settings_links\"] = html_settings_links\n\n\nclass ApiURLView(TemplateView):\n def get_context_data(self, **kwargs: Any) -> Dict[str, str]:\n context = super().get_context_data(**kwargs)\n add_api_url_context(context, self.request)\n return context\n\n\nsidebar_links = XPath(\"//a[@href=$url]\")\n\n\nclass MarkdownDirectoryView(ApiURLView):\n path_template = \"\"\n policies_view = False\n help_view = False\n api_doc_view = False\n\n def get_path(self, article: str) -> DocumentationArticle:\n http_status = 200\n if article == \"\":\n article = \"index\"\n elif article == \"include/sidebar_index\":\n pass\n elif article == \"api-doc-template\":\n # This markdown template shouldn't be accessed directly.\n article = \"missing\"\n http_status = 404\n elif \"/\" in article:\n article = \"missing\"\n http_status = 404\n elif len(article) > 100 or not re.match(\"^[0-9a-zA-Z_-]+$\", article):\n article = \"missing\"\n http_status = 404\n\n path = self.path_template % (article,)\n endpoint_name = None\n endpoint_method = None\n\n if not self.path_template.startswith(\"/\"):\n # Relative paths only used for policies documentation\n # when it is not configured or in the dev environment\n assert self.policies_view\n\n try:\n loader.get_template(path)\n return DocumentationArticle(\n article_path=path,\n article_http_status=http_status,\n endpoint_path=endpoint_name,\n endpoint_method=endpoint_method,\n )\n except loader.TemplateDoesNotExist:\n return DocumentationArticle(\n article_path=self.path_template % (\"missing\",),\n article_http_status=404,\n endpoint_path=None,\n endpoint_method=None,\n )\n\n if not os.path.exists(path):\n if self.api_doc_view:\n try:\n # API endpoints documented in zerver/openapi/zulip.yaml\n endpoint_name, endpoint_method = get_endpoint_from_operationid(article)\n path = self.path_template % (\"api-doc-template\",)\n except AssertionError:\n return DocumentationArticle(\n article_path=self.path_template % (\"missing\",),\n article_http_status=404,\n endpoint_path=None,\n endpoint_method=None,\n )\n elif self.help_view or self.policies_view:\n article = \"missing\"\n http_status = 404\n path = self.path_template % (article,)\n else:\n raise AssertionError(\"Invalid documentation view type\")\n\n return DocumentationArticle(\n article_path=path,\n article_http_status=http_status,\n endpoint_path=endpoint_name,\n endpoint_method=endpoint_method,\n )\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n article = kwargs[\"article\"]\n context: Dict[str, Any] = super().get_context_data()\n\n documentation_article = self.get_path(article)\n context[\"article\"] = documentation_article.article_path\n not_index_page = not context[\"article\"].endswith(\"/index.md\")\n\n if documentation_article.article_path.startswith(\"/\") and os.path.exists(\n documentation_article.article_path\n ):\n # Absolute path case\n article_absolute_path = documentation_article.article_path\n else:\n # Relative path case\n article_absolute_path = os.path.join(\n settings.DEPLOY_ROOT, \"templates\", documentation_article.article_path\n )\n\n if self.help_view:\n context[\"page_is_help_center\"] = True\n context[\"doc_root\"] = \"/help/\"\n context[\"doc_root_title\"] = \"Help center\"\n sidebar_article = self.get_path(\"include/sidebar_index\")\n sidebar_index = sidebar_article.article_path\n title_base = \"Zulip help center\"\n elif self.policies_view:\n context[\"page_is_policy_center\"] = True\n context[\"doc_root\"] = \"/policies/\"\n context[\"doc_root_title\"] = \"Terms and policies\"\n sidebar_article = self.get_path(\"sidebar_index\")\n sidebar_index = sidebar_article.article_path\n title_base = \"Zulip terms and policies\"\n elif self.api_doc_view:\n context[\"page_is_api_center\"] = True\n context[\"doc_root\"] = \"/api/\"\n context[\"doc_root_title\"] = \"API documentation\"\n sidebar_article = self.get_path(\"sidebar_index\")\n sidebar_index = sidebar_article.article_path\n title_base = \"Zulip API documentation\"\n else:\n raise AssertionError(\"Invalid documentation view type\")\n\n # The following is a somewhat hacky approach to extract titles from articles.\n endpoint_name = None\n endpoint_method = None\n if os.path.exists(article_absolute_path):\n with open(article_absolute_path) as article_file:\n first_line = article_file.readlines()[0]\n if self.api_doc_view and context[\"article\"].endswith(\"api-doc-template.md\"):\n endpoint_name, endpoint_method = (\n documentation_article.endpoint_path,\n documentation_article.endpoint_method,\n )\n assert endpoint_name is not None\n assert endpoint_method is not None\n article_title = get_openapi_summary(endpoint_name, endpoint_method)\n elif self.api_doc_view and \"{generate_api_header(\" in first_line:\n api_operation = context[\"PAGE_METADATA_URL\"].split(\"/api/\")[1]\n endpoint_name, endpoint_method = get_endpoint_from_operationid(api_operation)\n article_title = get_openapi_summary(endpoint_name, endpoint_method)\n else:\n # Strip the header and then use the first line to get the article title\n article_title = first_line.lstrip(\"#\").strip()\n endpoint_name = endpoint_method = None\n if not_index_page:\n context[\"PAGE_TITLE\"] = f\"{article_title} | {title_base}\"\n else:\n context[\"PAGE_TITLE\"] = title_base\n request_notes = RequestNotes.get_notes(self.request)\n request_notes.placeholder_open_graph_description = (\n f\"REPLACEMENT_PAGE_DESCRIPTION_{int(2**24 * random.random())}\"\n )\n context[\"PAGE_DESCRIPTION\"] = request_notes.placeholder_open_graph_description\n\n # An \"article\" might require the api_url_context to be rendered\n api_url_context: Dict[str, Any] = {}\n add_api_url_context(api_url_context, self.request)\n api_url_context[\"run_content_validators\"] = True\n context[\"api_url_context\"] = api_url_context\n if endpoint_name and endpoint_method:\n context[\"api_url_context\"][\"API_ENDPOINT_NAME\"] = endpoint_name + \":\" + endpoint_method\n\n sidebar_html = render_markdown_path(sidebar_index)\n tree = html.fragment_fromstring(sidebar_html, create_parent=True)\n if not context.get(\"page_is_policy_center\", False):\n home_h1 = Element(\"h1\")\n home_link = SubElement(home_h1, \"a\")\n home_link.attrib[\"class\"] = \"no-underline\"\n home_link.attrib[\"href\"] = context[\"doc_root\"]\n home_link.text = context[\"doc_root_title\"] + \" home\"\n tree.insert(0, home_h1)\n url = context[\"doc_root\"] + article\n # Highlight current article link\n links = sidebar_links(tree, url=url)\n assert isinstance(links, list)\n for a in links:\n assert isinstance(a, _Element)\n old_class = a.attrib.get(\"class\", \"\")\n assert isinstance(old_class, str)\n a.attrib[\"class\"] = old_class + \" highlighted\"\n sidebar_html = \"\".join(html.tostring(child, encoding=\"unicode\") for child in tree)\n context[\"sidebar_html\"] = Markup(sidebar_html)\n\n add_google_analytics_context(context)\n return context\n\n def get(\n self, request: HttpRequest, *args: object, article: str = \"\", **kwargs: object\n ) -> HttpResponse:\n # Hack: It's hard to reinitialize urls.py from tests, and so\n # we want to defer the use of settings.POLICIES_DIRECTORY to\n # runtime.\n if self.policies_view:\n self.path_template = f\"{settings.POLICIES_DIRECTORY}/%s.md\"\n\n documentation_article = self.get_path(article)\n http_status = documentation_article.article_http_status\n result = super().get(request, article=article)\n if http_status != 200:\n result.status_code = http_status\n return result\n\n\ndef add_integrations_context(context: Dict[str, Any]) -> None:\n alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))\n alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))\n enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))\n # Subtract 1 so saying \"Over X integrations\" is correct. Then,\n # round down to the nearest multiple of 10.\n integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10\n context[\"categories_dict\"] = alphabetical_sorted_categories\n context[\"integrations_dict\"] = alphabetical_sorted_integration\n context[\"integrations_count_display\"] = integrations_count_display\n\n\ndef add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:\n path_name = request.path.rstrip(\"/\").split(\"/\")[-1]\n description = (\n \"Zulip comes with over a hundred native integrations out of the box, \"\n \"and integrates with Zapier and IFTTT to provide hundreds more. \"\n \"Connect the apps you use every day to Zulip.\"\n )\n\n if path_name in INTEGRATIONS:\n integration = INTEGRATIONS[path_name]\n context[\"PAGE_TITLE\"] = f\"{integration.display_name} | Zulip integrations\"\n context[\"PAGE_DESCRIPTION\"] = description\n\n elif path_name in CATEGORIES:\n category = CATEGORIES[path_name]\n if path_name in META_CATEGORY:\n context[\"PAGE_TITLE\"] = f\"{category} | Zulip integrations\"\n else:\n context[\"PAGE_TITLE\"] = f\"{category} tools | Zulip integrations\"\n context[\"PAGE_DESCRIPTION\"] = description\n\n elif path_name == \"integrations\":\n context[\"PAGE_TITLE\"] = \"Zulip integrations\"\n context[\"PAGE_DESCRIPTION\"] = description\n\n\nclass IntegrationView(ApiURLView):\n template_name = \"zerver/integrations/index.html\"\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context: Dict[str, Any] = super().get_context_data(**kwargs)\n add_integrations_context(context)\n add_integrations_open_graph_context(context, self.request)\n add_google_analytics_context(context)\n return context\n\n\n@has_request_variables\ndef integration_doc(request: HttpRequest, integration_name: str = REQ()) -> HttpResponse:\n # FIXME: This check is jQuery-specific.\n if request.headers.get(\"x-requested-with\") != \"XMLHttpRequest\":\n return HttpResponseNotFound()\n\n try:\n integration = INTEGRATIONS[integration_name]\n except KeyError:\n return HttpResponseNotFound()\n\n context: Dict[str, Any] = {}\n add_api_url_context(context, request)\n\n context[\"integration_name\"] = integration.name\n context[\"integration_display_name\"] = integration.display_name\n context[\"recommended_stream_name\"] = integration.stream_name\n if isinstance(integration, WebhookIntegration):\n context[\"integration_url\"] = integration.url[3:]\n if (\n hasattr(integration.function, \"_all_event_types\")\n and integration.function._all_event_types is not None\n ):\n context[\"all_event_types\"] = integration.function._all_event_types\n if isinstance(integration, HubotIntegration):\n context[\"hubot_docs_url\"] = integration.hubot_docs_url\n\n doc_html_str = render_markdown_path(integration.doc, context, integration_doc=True)\n\n return HttpResponse(doc_html_str)\n", "path": "zerver/views/documentation.py"}]} | 3,993 | 568 |
gh_patches_debug_4164 | rasdani/github-patches | git_diff | lutris__lutris-1859 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_children_pids_of_thread(): process.py:57:get_children_pids_of_thread:ProcessLookupError: [Errno 3] No such process: '/proc/25892/task/25892/children'
Here's another Fedora report.
https://bugzilla.redhat.com/show_bug.cgi?id=1686988
cmdline: /usr/bin/python3 -s /usr/bin/lutris-wrapper 0 0 /home/u13881/.local/share/lutris/runtime/winetricks/winetricks --unattended
crash_function: get_children_pids_of_thread
exception_type: ProcessLookupError
executable: /usr/bin/lutris-wrapper
interpreter: python3-3.7.2-4.fc29.x86_64
kernel: 4.20.13-200.fc29.x86_64
runlevel: N 5
type: Python3
uid: 1000
Truncated backtrace:
process.py:57:get_children_pids_of_thread:ProcessLookupError: [Errno 3] No such process: '/proc/25892/task/25892/children'
Traceback (most recent call last):
File "/usr/bin/lutris-wrapper", line 110, in <module>
main()
File "/usr/bin/lutris-wrapper", line 91, in main
if not monitor.refresh_process_status():
File "/usr/lib/python3.7/site-packages/lutris/util/monitor.py", line 98, in refresh_process_status
for child in self.iter_children(Process(os.getpid())):
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 20, in __init__
self.get_children()
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 67, in get_children
self.children.append(Process(child_pid, parent=self))
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 20, in __init__
self.get_children()
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 67, in get_children
self.children.append(Process(child_pid, parent=self))
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 20, in __init__
self.get_children()
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 66, in get_children
for child_pid in self.get_children_pids_of_thread(tid):
File "/usr/lib/python3.7/site-packages/lutris/util/process.py", line 57, in get_children_pids_of_thread
with open(children_path) as children_file:
ProcessLookupError: [Errno 3] No such process: '/proc/25892/task/25892/children'
Local variables in innermost frame:
self: Process 25892
tid: '25892'
children_path: '/proc/25892/task/25892/children'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/process.py`
Content:
```
1 """Class to manipulate a process"""
2 import os
3 from lutris.util.log import logger
4 from lutris.util.system import path_exists
5
6
7 class InvalidPid(Exception):
8 """Exception raised when an operation on a non-existent PID is called"""
9
10
11 class Process:
12 """Python abstraction a Linux process"""
13 def __init__(self, pid, parent=None):
14 try:
15 self.pid = int(pid)
16 except ValueError:
17 raise InvalidPid("'%s' is not a valid pid" % pid)
18 self.children = []
19 self.parent = None
20 self.get_children()
21
22 def __repr__(self):
23 return "Process {}".format(self.pid)
24
25 def __str__(self):
26 return "{} ({}:{})".format(self.name, self.pid, self.state)
27
28 def get_stat(self, parsed=True):
29 stat_filename = "/proc/{}/stat".format(self.pid)
30 if not path_exists(stat_filename):
31 return None
32 with open(stat_filename) as stat_file:
33 try:
34 _stat = stat_file.readline()
35 except (ProcessLookupError, FileNotFoundError):
36 logger.warning("Unable to read stat for process %s", self.pid)
37 return None
38 if parsed:
39 return _stat[_stat.rfind(")") + 1:].split()
40 return _stat
41
42 def get_thread_ids(self):
43 """Return a list of thread ids opened by process."""
44 basedir = "/proc/{}/task/".format(self.pid)
45 if os.path.isdir(basedir):
46 try:
47 return [tid for tid in os.listdir(basedir)]
48 except FileNotFoundError:
49 return []
50 else:
51 return []
52
53 def get_children_pids_of_thread(self, tid):
54 """Return pids of child processes opened by thread `tid` of process."""
55 children_path = "/proc/{}/task/{}/children".format(self.pid, tid)
56 try:
57 with open(children_path) as children_file:
58 children_content = children_file.read()
59 except FileNotFoundError:
60 children_content = ""
61 return children_content.strip().split()
62
63 def get_children(self):
64 self.children = []
65 for tid in self.get_thread_ids():
66 for child_pid in self.get_children_pids_of_thread(tid):
67 self.children.append(Process(child_pid, parent=self))
68
69 @property
70 def name(self):
71 """Filename of the executable."""
72 _stat = self.get_stat(parsed=False)
73 if _stat:
74 return _stat[_stat.find("(") + 1:_stat.rfind(")")]
75 return None
76
77 @property
78 def state(self):
79 """One character from the string "RSDZTW" where R is running, S is
80 sleeping in an interruptible wait, D is waiting in uninterruptible disk
81 sleep, Z is zombie, T is traced or stopped (on a signal), and W is
82 paging.
83 """
84 _stat = self.get_stat()
85 if _stat:
86 return _stat[0]
87 return None
88
89 @property
90 def cmdline(self):
91 """Return command line used to run the process `pid`."""
92 cmdline_path = "/proc/{}/cmdline".format(self.pid)
93 with open(cmdline_path) as cmdline_file:
94 _cmdline = cmdline_file.read().replace("\x00", " ")
95 return _cmdline
96
97 @property
98 def cwd(self):
99 """Return current working dir of process"""
100 cwd_path = "/proc/%d/cwd" % int(self.pid)
101 return os.readlink(cwd_path)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/process.py b/lutris/util/process.py
--- a/lutris/util/process.py
+++ b/lutris/util/process.py
@@ -56,7 +56,7 @@
try:
with open(children_path) as children_file:
children_content = children_file.read()
- except FileNotFoundError:
+ except (FileNotFoundError, ProcessLookupError):
children_content = ""
return children_content.strip().split()
| {"golden_diff": "diff --git a/lutris/util/process.py b/lutris/util/process.py\n--- a/lutris/util/process.py\n+++ b/lutris/util/process.py\n@@ -56,7 +56,7 @@\n try:\n with open(children_path) as children_file:\n children_content = children_file.read()\n- except FileNotFoundError:\n+ except (FileNotFoundError, ProcessLookupError):\n children_content = \"\"\n return children_content.strip().split()\n", "issue": "get_children_pids_of_thread(): process.py:57:get_children_pids_of_thread:ProcessLookupError: [Errno 3] No such process: '/proc/25892/task/25892/children'\nHere's another Fedora report.\r\n\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=1686988\r\n\r\ncmdline: /usr/bin/python3 -s /usr/bin/lutris-wrapper 0 0 /home/u13881/.local/share/lutris/runtime/winetricks/winetricks --unattended\r\ncrash_function: get_children_pids_of_thread\r\nexception_type: ProcessLookupError\r\nexecutable: /usr/bin/lutris-wrapper\r\ninterpreter: python3-3.7.2-4.fc29.x86_64\r\nkernel: 4.20.13-200.fc29.x86_64\r\nrunlevel: N 5\r\ntype: Python3\r\nuid: 1000\r\n\r\nTruncated backtrace:\r\nprocess.py:57:get_children_pids_of_thread:ProcessLookupError: [Errno 3] No such process: '/proc/25892/task/25892/children'\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/lutris-wrapper\", line 110, in <module>\r\n main()\r\n File \"/usr/bin/lutris-wrapper\", line 91, in main\r\n if not monitor.refresh_process_status():\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/monitor.py\", line 98, in refresh_process_status\r\n for child in self.iter_children(Process(os.getpid())):\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 20, in __init__\r\n self.get_children()\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 67, in get_children\r\n self.children.append(Process(child_pid, parent=self))\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 20, in __init__\r\n self.get_children()\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 67, in get_children\r\n self.children.append(Process(child_pid, parent=self))\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 20, in __init__\r\n self.get_children()\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 66, in get_children\r\n for child_pid in self.get_children_pids_of_thread(tid):\r\n File \"/usr/lib/python3.7/site-packages/lutris/util/process.py\", line 57, in get_children_pids_of_thread\r\n with open(children_path) as children_file:\r\nProcessLookupError: [Errno 3] No such process: '/proc/25892/task/25892/children'\r\n\r\nLocal variables in innermost frame:\r\nself: Process 25892\r\ntid: '25892'\r\nchildren_path: '/proc/25892/task/25892/children'\n", "before_files": [{"content": "\"\"\"Class to manipulate a process\"\"\"\nimport os\nfrom lutris.util.log import logger\nfrom lutris.util.system import path_exists\n\n\nclass InvalidPid(Exception):\n \"\"\"Exception raised when an operation on a non-existent PID is called\"\"\"\n\n\nclass Process:\n \"\"\"Python abstraction a Linux process\"\"\"\n def __init__(self, pid, parent=None):\n try:\n self.pid = int(pid)\n except ValueError:\n raise InvalidPid(\"'%s' is not a valid pid\" % pid)\n self.children = []\n self.parent = None\n self.get_children()\n\n def __repr__(self):\n return \"Process {}\".format(self.pid)\n\n def __str__(self):\n return \"{} ({}:{})\".format(self.name, self.pid, self.state)\n\n def get_stat(self, parsed=True):\n stat_filename = \"/proc/{}/stat\".format(self.pid)\n if not path_exists(stat_filename):\n return None\n with open(stat_filename) as stat_file:\n try:\n _stat = stat_file.readline()\n except (ProcessLookupError, FileNotFoundError):\n logger.warning(\"Unable to read stat for process %s\", self.pid)\n return None\n if parsed:\n return _stat[_stat.rfind(\")\") + 1:].split()\n return _stat\n\n def get_thread_ids(self):\n \"\"\"Return a list of thread ids opened by process.\"\"\"\n basedir = \"/proc/{}/task/\".format(self.pid)\n if os.path.isdir(basedir):\n try:\n return [tid for tid in os.listdir(basedir)]\n except FileNotFoundError:\n return []\n else:\n return []\n\n def get_children_pids_of_thread(self, tid):\n \"\"\"Return pids of child processes opened by thread `tid` of process.\"\"\"\n children_path = \"/proc/{}/task/{}/children\".format(self.pid, tid)\n try:\n with open(children_path) as children_file:\n children_content = children_file.read()\n except FileNotFoundError:\n children_content = \"\"\n return children_content.strip().split()\n\n def get_children(self):\n self.children = []\n for tid in self.get_thread_ids():\n for child_pid in self.get_children_pids_of_thread(tid):\n self.children.append(Process(child_pid, parent=self))\n\n @property\n def name(self):\n \"\"\"Filename of the executable.\"\"\"\n _stat = self.get_stat(parsed=False)\n if _stat:\n return _stat[_stat.find(\"(\") + 1:_stat.rfind(\")\")]\n return None\n\n @property\n def state(self):\n \"\"\"One character from the string \"RSDZTW\" where R is running, S is\n sleeping in an interruptible wait, D is waiting in uninterruptible disk\n sleep, Z is zombie, T is traced or stopped (on a signal), and W is\n paging.\n \"\"\"\n _stat = self.get_stat()\n if _stat:\n return _stat[0]\n return None\n\n @property\n def cmdline(self):\n \"\"\"Return command line used to run the process `pid`.\"\"\"\n cmdline_path = \"/proc/{}/cmdline\".format(self.pid)\n with open(cmdline_path) as cmdline_file:\n _cmdline = cmdline_file.read().replace(\"\\x00\", \" \")\n return _cmdline\n\n @property\n def cwd(self):\n \"\"\"Return current working dir of process\"\"\"\n cwd_path = \"/proc/%d/cwd\" % int(self.pid)\n return os.readlink(cwd_path)\n", "path": "lutris/util/process.py"}], "after_files": [{"content": "\"\"\"Class to manipulate a process\"\"\"\nimport os\nfrom lutris.util.log import logger\nfrom lutris.util.system import path_exists\n\n\nclass InvalidPid(Exception):\n \"\"\"Exception raised when an operation on a non-existent PID is called\"\"\"\n\n\nclass Process:\n \"\"\"Python abstraction a Linux process\"\"\"\n def __init__(self, pid, parent=None):\n try:\n self.pid = int(pid)\n except ValueError:\n raise InvalidPid(\"'%s' is not a valid pid\" % pid)\n self.children = []\n self.parent = None\n self.get_children()\n\n def __repr__(self):\n return \"Process {}\".format(self.pid)\n\n def __str__(self):\n return \"{} ({}:{})\".format(self.name, self.pid, self.state)\n\n def get_stat(self, parsed=True):\n stat_filename = \"/proc/{}/stat\".format(self.pid)\n if not path_exists(stat_filename):\n return None\n with open(stat_filename) as stat_file:\n try:\n _stat = stat_file.readline()\n except (ProcessLookupError, FileNotFoundError):\n logger.warning(\"Unable to read stat for process %s\", self.pid)\n return None\n if parsed:\n return _stat[_stat.rfind(\")\") + 1:].split()\n return _stat\n\n def get_thread_ids(self):\n \"\"\"Return a list of thread ids opened by process.\"\"\"\n basedir = \"/proc/{}/task/\".format(self.pid)\n if os.path.isdir(basedir):\n try:\n return [tid for tid in os.listdir(basedir)]\n except FileNotFoundError:\n return []\n else:\n return []\n\n def get_children_pids_of_thread(self, tid):\n \"\"\"Return pids of child processes opened by thread `tid` of process.\"\"\"\n children_path = \"/proc/{}/task/{}/children\".format(self.pid, tid)\n try:\n with open(children_path) as children_file:\n children_content = children_file.read()\n except (FileNotFoundError, ProcessLookupError):\n children_content = \"\"\n return children_content.strip().split()\n\n def get_children(self):\n self.children = []\n for tid in self.get_thread_ids():\n for child_pid in self.get_children_pids_of_thread(tid):\n self.children.append(Process(child_pid, parent=self))\n\n @property\n def name(self):\n \"\"\"Filename of the executable.\"\"\"\n _stat = self.get_stat(parsed=False)\n if _stat:\n return _stat[_stat.find(\"(\") + 1:_stat.rfind(\")\")]\n return None\n\n @property\n def state(self):\n \"\"\"One character from the string \"RSDZTW\" where R is running, S is\n sleeping in an interruptible wait, D is waiting in uninterruptible disk\n sleep, Z is zombie, T is traced or stopped (on a signal), and W is\n paging.\n \"\"\"\n _stat = self.get_stat()\n if _stat:\n return _stat[0]\n return None\n\n @property\n def cmdline(self):\n \"\"\"Return command line used to run the process `pid`.\"\"\"\n cmdline_path = \"/proc/{}/cmdline\".format(self.pid)\n with open(cmdline_path) as cmdline_file:\n _cmdline = cmdline_file.read().replace(\"\\x00\", \" \")\n return _cmdline\n\n @property\n def cwd(self):\n \"\"\"Return current working dir of process\"\"\"\n cwd_path = \"/proc/%d/cwd\" % int(self.pid)\n return os.readlink(cwd_path)\n", "path": "lutris/util/process.py"}]} | 1,908 | 98 |
gh_patches_debug_3671 | rasdani/github-patches | git_diff | sanic-org__sanic-2211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In ASGI mod, the response turns into 500 server error.
In ASGI mod, if request middleware raises an exception and meanwhile there is an error in fetching handler from router part as well, server response turns into 500 server error.
**app.py**
```python
from sanic import Request, Sanic
from sanic.exceptions import Forbidden
from sanic.response import text
app = Sanic("My Hello, world app")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
@app.middleware
async def request_middleware(request: Request) -> None:
if "Authorization" not in request.headers:
raise Forbidden(message="Authorization header not found")
```
```bash
>>> uvicorn app:app
INFO: Started server process [227205]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic_routing/router.py", line 79, in resolve
route, param_basket = self.find_route(
File "", line 9, in find_route
sanic_routing.exceptions.NotFound: Not Found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/router.py", line 33, in _get
return self.resolve(
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic_routing/router.py", line 96, in resolve
raise self.exception(str(e), path=path)
sanic_routing.exceptions.NotFound: Not Found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py", line 723, in handle_request
route, handler, kwargs = self.router.get(
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/router.py", line 61, in get
return self._get(path, method, host)
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/router.py", line 39, in _get
raise NotFound("Requested URL {} not found".format(e.path))
sanic.exceptions.NotFound: Requested URL /wrong not found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py", line 371, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 59, in __call__
return await self.app(scope, receive, send)
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py", line 1276, in __call__
await asgi_app()
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/asgi.py", line 210, in __call__
await self.sanic_app.handle_request(self.request)
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py", line 791, in handle_request
await self.handle_exception(request, e)
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py", line 667, in handle_exception
response = await self._run_request_middleware(
File "/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py", line 1116, in _run_request_middleware
response = await response
File "/home/cansarigol/Documents/sanic-demo/./app.py", line 16, in request_middleware
raise Forbidden(message="Authorization header not found")
sanic.exceptions.Forbidden: Authorization header not found
INFO: 127.0.0.1:52662 - "GET /wrong HTTP/1.1" 500 Internal Server Error
```
**Expected behavior**
it should have been 403 (the same approach as WSGI).
**Environment (please complete the following information):**
- OS: ubuntu 20.04
- Version
python = "3.8"
sanic = "21.6.1"
uvicorn = "0.14.0"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/asgi.py`
Content:
```
1 import warnings
2
3 from inspect import isawaitable
4 from typing import Optional
5 from urllib.parse import quote
6
7 import sanic.app # noqa
8
9 from sanic.compat import Header
10 from sanic.exceptions import ServerError
11 from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
12 from sanic.request import Request
13 from sanic.server import ConnInfo
14 from sanic.websocket import WebSocketConnection
15
16
17 class Lifespan:
18 def __init__(self, asgi_app: "ASGIApp") -> None:
19 self.asgi_app = asgi_app
20
21 if "before_server_start" in self.asgi_app.sanic_app.listeners:
22 warnings.warn(
23 'You have set a listener for "before_server_start" '
24 "in ASGI mode. "
25 "It will be executed as early as possible, but not before "
26 "the ASGI server is started."
27 )
28 if "after_server_stop" in self.asgi_app.sanic_app.listeners:
29 warnings.warn(
30 'You have set a listener for "after_server_stop" '
31 "in ASGI mode. "
32 "It will be executed as late as possible, but not after "
33 "the ASGI server is stopped."
34 )
35
36 async def startup(self) -> None:
37 """
38 Gather the listeners to fire on server start.
39 Because we are using a third-party server and not Sanic server, we do
40 not have access to fire anything BEFORE the server starts.
41 Therefore, we fire before_server_start and after_server_start
42 in sequence since the ASGI lifespan protocol only supports a single
43 startup event.
44 """
45 self.asgi_app.sanic_app.router.finalize()
46 if self.asgi_app.sanic_app.signal_router.routes:
47 self.asgi_app.sanic_app.signal_router.finalize()
48 listeners = self.asgi_app.sanic_app.listeners.get(
49 "before_server_start", []
50 ) + self.asgi_app.sanic_app.listeners.get("after_server_start", [])
51
52 for handler in listeners:
53 response = handler(
54 self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop
55 )
56 if response and isawaitable(response):
57 await response
58
59 async def shutdown(self) -> None:
60 """
61 Gather the listeners to fire on server stop.
62 Because we are using a third-party server and not Sanic server, we do
63 not have access to fire anything AFTER the server stops.
64 Therefore, we fire before_server_stop and after_server_stop
65 in sequence since the ASGI lifespan protocol only supports a single
66 shutdown event.
67 """
68 listeners = self.asgi_app.sanic_app.listeners.get(
69 "before_server_stop", []
70 ) + self.asgi_app.sanic_app.listeners.get("after_server_stop", [])
71
72 for handler in listeners:
73 response = handler(
74 self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop
75 )
76 if response and isawaitable(response):
77 await response
78
79 async def __call__(
80 self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
81 ) -> None:
82 message = await receive()
83 if message["type"] == "lifespan.startup":
84 await self.startup()
85 await send({"type": "lifespan.startup.complete"})
86
87 message = await receive()
88 if message["type"] == "lifespan.shutdown":
89 await self.shutdown()
90 await send({"type": "lifespan.shutdown.complete"})
91
92
93 class ASGIApp:
94 sanic_app: "sanic.app.Sanic"
95 request: Request
96 transport: MockTransport
97 lifespan: Lifespan
98 ws: Optional[WebSocketConnection]
99
100 def __init__(self) -> None:
101 self.ws = None
102
103 @classmethod
104 async def create(
105 cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
106 ) -> "ASGIApp":
107 instance = cls()
108 instance.sanic_app = sanic_app
109 instance.transport = MockTransport(scope, receive, send)
110 instance.transport.loop = sanic_app.loop
111 setattr(instance.transport, "add_task", sanic_app.loop.create_task)
112
113 headers = Header(
114 [
115 (key.decode("latin-1"), value.decode("latin-1"))
116 for key, value in scope.get("headers", [])
117 ]
118 )
119 instance.lifespan = Lifespan(instance)
120
121 if scope["type"] == "lifespan":
122 await instance.lifespan(scope, receive, send)
123 else:
124 path = (
125 scope["path"][1:]
126 if scope["path"].startswith("/")
127 else scope["path"]
128 )
129 url = "/".join([scope.get("root_path", ""), quote(path)])
130 url_bytes = url.encode("latin-1")
131 url_bytes += b"?" + scope["query_string"]
132
133 if scope["type"] == "http":
134 version = scope["http_version"]
135 method = scope["method"]
136 elif scope["type"] == "websocket":
137 version = "1.1"
138 method = "GET"
139
140 instance.ws = instance.transport.create_websocket_connection(
141 send, receive
142 )
143 else:
144 raise ServerError("Received unknown ASGI scope")
145
146 request_class = sanic_app.request_class or Request
147 instance.request = request_class(
148 url_bytes,
149 headers,
150 version,
151 method,
152 instance.transport,
153 sanic_app,
154 )
155 instance.request.stream = instance
156 instance.request_body = True
157 instance.request.conn_info = ConnInfo(instance.transport)
158
159 return instance
160
161 async def read(self) -> Optional[bytes]:
162 """
163 Read and stream the body in chunks from an incoming ASGI message.
164 """
165 message = await self.transport.receive()
166 body = message.get("body", b"")
167 if not message.get("more_body", False):
168 self.request_body = False
169 if not body:
170 return None
171 return body
172
173 async def __aiter__(self):
174 while self.request_body:
175 data = await self.read()
176 if data:
177 yield data
178
179 def respond(self, response):
180 response.stream, self.response = self, response
181 return response
182
183 async def send(self, data, end_stream):
184 if self.response:
185 response, self.response = self.response, None
186 await self.transport.send(
187 {
188 "type": "http.response.start",
189 "status": response.status,
190 "headers": response.processed_headers,
191 }
192 )
193 response_body = getattr(response, "body", None)
194 if response_body:
195 data = response_body + data if data else response_body
196 await self.transport.send(
197 {
198 "type": "http.response.body",
199 "body": data.encode() if hasattr(data, "encode") else data,
200 "more_body": not end_stream,
201 }
202 )
203
204 _asgi_single_callable = True # We conform to ASGI 3.0 single-callable
205
206 async def __call__(self) -> None:
207 """
208 Handle the incoming request.
209 """
210 await self.sanic_app.handle_request(self.request)
211
```
Path: `sanic/__version__.py`
Content:
```
1 __version__ = "21.6.1"
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/__version__.py b/sanic/__version__.py
--- a/sanic/__version__.py
+++ b/sanic/__version__.py
@@ -1 +1 @@
-__version__ = "21.6.1"
+__version__ = "21.6.2"
diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -207,4 +207,7 @@
"""
Handle the incoming request.
"""
- await self.sanic_app.handle_request(self.request)
+ try:
+ await self.sanic_app.handle_request(self.request)
+ except Exception as e:
+ await self.sanic_app.handle_exception(self.request, e)
| {"golden_diff": "diff --git a/sanic/__version__.py b/sanic/__version__.py\n--- a/sanic/__version__.py\n+++ b/sanic/__version__.py\n@@ -1 +1 @@\n-__version__ = \"21.6.1\"\n+__version__ = \"21.6.2\"\ndiff --git a/sanic/asgi.py b/sanic/asgi.py\n--- a/sanic/asgi.py\n+++ b/sanic/asgi.py\n@@ -207,4 +207,7 @@\n \"\"\"\n Handle the incoming request.\n \"\"\"\n- await self.sanic_app.handle_request(self.request)\n+ try:\n+ await self.sanic_app.handle_request(self.request)\n+ except Exception as e:\n+ await self.sanic_app.handle_exception(self.request, e)\n", "issue": "In ASGI mod, the response turns into 500 server error.\nIn ASGI mod, if request middleware raises an exception and meanwhile there is an error in fetching handler from router part as well, server response turns into 500 server error. \r\n\r\n**app.py**\r\n```python\r\nfrom sanic import Request, Sanic\r\nfrom sanic.exceptions import Forbidden\r\nfrom sanic.response import text\r\n\r\napp = Sanic(\"My Hello, world app\")\r\n\r\n\r\[email protected](\"/\")\r\nasync def hello_world(request):\r\n return text(\"Hello, world.\")\r\n\r\n\r\[email protected]\r\nasync def request_middleware(request: Request) -> None:\r\n if \"Authorization\" not in request.headers:\r\n raise Forbidden(message=\"Authorization header not found\")\r\n```\r\n```bash\r\n>>> uvicorn app:app\r\nINFO: Started server process [227205]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\r\nERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic_routing/router.py\", line 79, in resolve\r\n route, param_basket = self.find_route(\r\n File \"\", line 9, in find_route\r\nsanic_routing.exceptions.NotFound: Not Found\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/router.py\", line 33, in _get\r\n return self.resolve(\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic_routing/router.py\", line 96, in resolve\r\n raise self.exception(str(e), path=path)\r\nsanic_routing.exceptions.NotFound: Not Found\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py\", line 723, in handle_request\r\n route, handler, kwargs = self.router.get(\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/router.py\", line 61, in get\r\n return self._get(path, method, host)\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/router.py\", line 39, in _get\r\n raise NotFound(\"Requested URL {} not found\".format(e.path))\r\nsanic.exceptions.NotFound: Requested URL /wrong not found\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 371, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py\", line 59, in __call__\r\n return await self.app(scope, receive, send)\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py\", line 1276, in __call__\r\n await asgi_app()\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/asgi.py\", line 210, in __call__\r\n await self.sanic_app.handle_request(self.request)\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py\", line 791, in handle_request\r\n await self.handle_exception(request, e)\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py\", line 667, in handle_exception\r\n response = await self._run_request_middleware(\r\n File \"/home/cansarigol/Documents/sanic-demo/venv/lib/python3.8/site-packages/sanic/app.py\", line 1116, in _run_request_middleware\r\n response = await response\r\n File \"/home/cansarigol/Documents/sanic-demo/./app.py\", line 16, in request_middleware\r\n raise Forbidden(message=\"Authorization header not found\")\r\nsanic.exceptions.Forbidden: Authorization header not found\r\nINFO: 127.0.0.1:52662 - \"GET /wrong HTTP/1.1\" 500 Internal Server Error\r\n```\r\n\r\n**Expected behavior**\r\nit should have been 403 (the same approach as WSGI).\r\n\r\n**Environment (please complete the following information):**\r\n - OS: ubuntu 20.04\r\n - Version\r\n python = \"3.8\"\r\n sanic = \"21.6.1\"\r\n uvicorn = \"0.14.0\"\r\n\n", "before_files": [{"content": "import warnings\n\nfrom inspect import isawaitable\nfrom typing import Optional\nfrom urllib.parse import quote\n\nimport sanic.app # noqa\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.server import ConnInfo\nfrom sanic.websocket import WebSocketConnection\n\n\nclass Lifespan:\n def __init__(self, asgi_app: \"ASGIApp\") -> None:\n self.asgi_app = asgi_app\n\n if \"before_server_start\" in self.asgi_app.sanic_app.listeners:\n warnings.warn(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\"\n )\n if \"after_server_stop\" in self.asgi_app.sanic_app.listeners:\n warnings.warn(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\"\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n self.asgi_app.sanic_app.router.finalize()\n if self.asgi_app.sanic_app.signal_router.routes:\n self.asgi_app.sanic_app.signal_router.finalize()\n listeners = self.asgi_app.sanic_app.listeners.get(\n \"before_server_start\", []\n ) + self.asgi_app.sanic_app.listeners.get(\"after_server_start\", [])\n\n for handler in listeners:\n response = handler(\n self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop\n )\n if response and isawaitable(response):\n await response\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n listeners = self.asgi_app.sanic_app.listeners.get(\n \"before_server_stop\", []\n ) + self.asgi_app.sanic_app.listeners.get(\"after_server_stop\", [])\n\n for handler in listeners:\n response = handler(\n self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop\n )\n if response and isawaitable(response):\n await response\n\n async def __call__(\n self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n await self.startup()\n await send({\"type\": \"lifespan.startup.complete\"})\n\n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n await self.shutdown()\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n\nclass ASGIApp:\n sanic_app: \"sanic.app.Sanic\"\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n\n def __init__(self) -> None:\n self.ws = None\n\n @classmethod\n async def create(\n cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> \"ASGIApp\":\n instance = cls()\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n instance.lifespan = Lifespan(instance)\n\n if scope[\"type\"] == \"lifespan\":\n await instance.lifespan(scope, receive, send)\n else:\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response):\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n await self.sanic_app.handle_request(self.request)\n", "path": "sanic/asgi.py"}, {"content": "__version__ = \"21.6.1\"\n", "path": "sanic/__version__.py"}], "after_files": [{"content": "import warnings\n\nfrom inspect import isawaitable\nfrom typing import Optional\nfrom urllib.parse import quote\n\nimport sanic.app # noqa\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.server import ConnInfo\nfrom sanic.websocket import WebSocketConnection\n\n\nclass Lifespan:\n def __init__(self, asgi_app: \"ASGIApp\") -> None:\n self.asgi_app = asgi_app\n\n if \"before_server_start\" in self.asgi_app.sanic_app.listeners:\n warnings.warn(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\"\n )\n if \"after_server_stop\" in self.asgi_app.sanic_app.listeners:\n warnings.warn(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\"\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n self.asgi_app.sanic_app.router.finalize()\n if self.asgi_app.sanic_app.signal_router.routes:\n self.asgi_app.sanic_app.signal_router.finalize()\n listeners = self.asgi_app.sanic_app.listeners.get(\n \"before_server_start\", []\n ) + self.asgi_app.sanic_app.listeners.get(\"after_server_start\", [])\n\n for handler in listeners:\n response = handler(\n self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop\n )\n if response and isawaitable(response):\n await response\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n listeners = self.asgi_app.sanic_app.listeners.get(\n \"before_server_stop\", []\n ) + self.asgi_app.sanic_app.listeners.get(\"after_server_stop\", [])\n\n for handler in listeners:\n response = handler(\n self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop\n )\n if response and isawaitable(response):\n await response\n\n async def __call__(\n self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n await self.startup()\n await send({\"type\": \"lifespan.startup.complete\"})\n\n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n await self.shutdown()\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n\nclass ASGIApp:\n sanic_app: \"sanic.app.Sanic\"\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n\n def __init__(self) -> None:\n self.ws = None\n\n @classmethod\n async def create(\n cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> \"ASGIApp\":\n instance = cls()\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n instance.lifespan = Lifespan(instance)\n\n if scope[\"type\"] == \"lifespan\":\n await instance.lifespan(scope, receive, send)\n else:\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response):\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n await self.sanic_app.handle_exception(self.request, e)\n", "path": "sanic/asgi.py"}, {"content": "__version__ = \"21.6.2\"\n", "path": "sanic/__version__.py"}]} | 3,507 | 173 |
gh_patches_debug_24181 | rasdani/github-patches | git_diff | ansible__awx-13528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception handling for Conjur Secrets Manager Lookup not working properly
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
When using the Conjur Secrets Manager Lookup credential type against Conjur Cloud, exception handling does not properly switch to the proper URI endpoint automatically. Any attempts currently fail with HTTP 404.
### AWX version
0.1.dev32763+g3d73b80
### Select the relevant components
- [X] UI
- [ ] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
1. Select "CyberArk Conjur Secrets Manager Lookup" as the credential type for a new credential object in AWX.
2. Configure for a Conjur Cloud tenant.
3. Run "Test" and a failure with "HTTP 404" is returned.
### Expected results
The test should pass.
### Actual results
The test fails with an HTTP 404 error code.
### Additional information
This fix is being implemented in a PR within moments.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx/main/credential_plugins/conjur.py`
Content:
```
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import urljoin, quote
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8
9 conjur_inputs = {
10 'fields': [
11 {
12 'id': 'url',
13 'label': _('Conjur URL'),
14 'type': 'string',
15 'format': 'url',
16 },
17 {
18 'id': 'api_key',
19 'label': _('API Key'),
20 'type': 'string',
21 'secret': True,
22 },
23 {
24 'id': 'account',
25 'label': _('Account'),
26 'type': 'string',
27 },
28 {
29 'id': 'username',
30 'label': _('Username'),
31 'type': 'string',
32 },
33 {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},
34 ],
35 'metadata': [
36 {
37 'id': 'secret_path',
38 'label': _('Secret Identifier'),
39 'type': 'string',
40 'help_text': _('The identifier for the secret e.g., /some/identifier'),
41 },
42 {
43 'id': 'secret_version',
44 'label': _('Secret Version'),
45 'type': 'string',
46 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
47 },
48 ],
49 'required': ['url', 'api_key', 'account', 'username'],
50 }
51
52
53 def conjur_backend(**kwargs):
54 url = kwargs['url']
55 api_key = kwargs['api_key']
56 account = quote(kwargs['account'], safe='')
57 username = quote(kwargs['username'], safe='')
58 secret_path = quote(kwargs['secret_path'], safe='')
59 version = kwargs.get('secret_version')
60 cacert = kwargs.get('cacert', None)
61
62 auth_kwargs = {
63 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},
64 'data': api_key,
65 'allow_redirects': False,
66 }
67
68 with CertFiles(cacert) as cert:
69 # https://www.conjur.org/api.html#authentication-authenticate-post
70 auth_kwargs['verify'] = cert
71 try:
72 resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
73 except requests.exceptions.ConnectionError:
74 resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
75 raise_for_status(resp)
76 token = resp.content.decode('utf-8')
77
78 lookup_kwargs = {
79 'headers': {'Authorization': 'Token token="{}"'.format(token)},
80 'allow_redirects': False,
81 }
82
83 # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
84 path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
85 path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
86 if version:
87 ver = "version={}".format(version)
88 path = '?'.join([path, ver])
89 path_conjurcloud = '?'.join([path_conjurcloud, ver])
90
91 with CertFiles(cacert) as cert:
92 lookup_kwargs['verify'] = cert
93 try:
94 resp = requests.get(path, timeout=30, **lookup_kwargs)
95 except requests.exceptions.ConnectionError:
96 resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
97 raise_for_status(resp)
98 return resp.text
99
100
101 conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py
--- a/awx/main/credential_plugins/conjur.py
+++ b/awx/main/credential_plugins/conjur.py
@@ -70,7 +70,8 @@
auth_kwargs['verify'] = cert
try:
resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
- except requests.exceptions.ConnectionError:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError:
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp)
token = resp.content.decode('utf-8')
@@ -92,7 +93,8 @@
lookup_kwargs['verify'] = cert
try:
resp = requests.get(path, timeout=30, **lookup_kwargs)
- except requests.exceptions.ConnectionError:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError:
resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
raise_for_status(resp)
return resp.text
| {"golden_diff": "diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py\n--- a/awx/main/credential_plugins/conjur.py\n+++ b/awx/main/credential_plugins/conjur.py\n@@ -70,7 +70,8 @@\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n- except requests.exceptions.ConnectionError:\n+ resp.raise_for_status()\n+ except requests.exceptions.HTTPError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n@@ -92,7 +93,8 @@\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n- except requests.exceptions.ConnectionError:\n+ resp.raise_for_status()\n+ except requests.exceptions.HTTPError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n", "issue": "Exception handling for Conjur Secrets Manager Lookup not working properly\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Bug Summary\n\nWhen using the Conjur Secrets Manager Lookup credential type against Conjur Cloud, exception handling does not properly switch to the proper URI endpoint automatically. Any attempts currently fail with HTTP 404.\n\n### AWX version\n\n0.1.dev32763+g3d73b80\n\n### Select the relevant components\n\n- [X] UI\n- [ ] API\n- [ ] Docs\n- [ ] Collection\n- [ ] CLI\n- [ ] Other\n\n### Installation method\n\ndocker development environment\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n1. Select \"CyberArk Conjur Secrets Manager Lookup\" as the credential type for a new credential object in AWX.\r\n2. Configure for a Conjur Cloud tenant.\r\n3. Run \"Test\" and a failure with \"HTTP 404\" is returned.\n\n### Expected results\n\nThe test should pass.\n\n### Actual results\n\nThe test fails with an HTTP 404 error code.\n\n### Additional information\n\nThis fix is being implemented in a PR within moments.\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n path_conjurcloud = '?'.join([path_conjurcloud, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}], "after_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n resp.raise_for_status()\n except requests.exceptions.HTTPError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n path_conjurcloud = '?'.join([path_conjurcloud, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n resp.raise_for_status()\n except requests.exceptions.HTTPError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}]} | 1,621 | 268 |
gh_patches_debug_5935 | rasdani/github-patches | git_diff | Flexget__Flexget-2206 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Letterboxd plugin does not paginate correctly
### Expected behaviour:
When using a letterboxd list as input, lists with more than 100 entries should produce more than 100 entries in flexget.
### Actual behaviour:
Letterboxd input is only able to generate <=100 entries.
### Steps to reproduce:
- Step 1: Configure a letterboxd input for a list that contains > 100 entries
- Step 2: Run task and note that only 100 entries are generated.
### Additional information:
I see the problem:
https://github.com/Flexget/Flexget/blob/83bc8dc86fd4760c4f059353975a24e04e681888/flexget/plugins/input/letterboxd.py#L137
The actual markup on page is (as of this writing):
```html
<div class="paginate-nextprev">
<a class="next" href="/usrid/list/listname/page/pagenum/">Next</a>
</div>
```
So the query should actually be '.paginate-nextprev .next' not '.paginate-next'. I'm assuming they changed their site.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/input/letterboxd.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from flexget import plugin
7 from flexget.entry import Entry
8 from flexget.event import event
9 from flexget.utils.cached_input import cached
10 from flexget.utils.requests import RequestException, Session, TimedLimiter
11 from flexget.utils.soup import get_soup
12
13 log = logging.getLogger('letterboxd')
14
15 requests = Session(max_retries=5)
16 requests.add_domain_limiter(TimedLimiter('letterboxd.com', '1 seconds'))
17 base_url = 'http://letterboxd.com'
18
19 SLUGS = {
20 'default': {
21 'p_slug': '/%(user)s/list/%(list)s/',
22 'f_slug': 'data-film-slug'},
23 'diary': {
24 'p_slug': '/%(user)s/films/diary/',
25 'f_slug': 'data-film-slug'},
26 'likes': {
27 'p_slug': '/%(user)s/likes/films/',
28 'f_slug': 'data-film-link'},
29 'rated': {
30 'p_slug': '/%(user)s/films/ratings/',
31 'f_slug': 'data-film-slug'},
32 'watched': {
33 'p_slug': '/%(user)s/films/',
34 'f_slug': 'data-film-slug'},
35 'watchlist': {
36 'p_slug': '/%(user)s/watchlist/',
37 'f_slug': 'data-film-slug'}
38 }
39
40 SORT_BY = {
41 'default': '',
42 'added': 'by/added/',
43 'length-ascending': 'by/shortest/',
44 'length-descending': 'by/longest/',
45 'name': 'by/name/',
46 'popularity': 'by/popular/',
47 'rating-ascending': 'by/rating-lowest/',
48 'rating-descending': 'by/rating/',
49 'release-ascending': 'by/release-earliest/',
50 'release-descending': 'by/release/'
51 }
52
53
54 class Letterboxd(object):
55 schema = {
56 'type': 'object',
57 'properties': {
58 'username': {'type': 'string'},
59 'list': {'type': 'string'},
60 'sort_by': {
61 'type': 'string',
62 'enum': list(SORT_BY.keys()),
63 'default': 'default'},
64 'max_results': {'type': 'integer'}
65 },
66 'required': ['username', 'list'],
67 'additionalProperties': False
68 }
69
70 def build_config(self, config):
71 config['list'] = config['list'].lower().replace(' ', '-')
72 list_key = config['list']
73 if list_key not in list(SLUGS.keys()):
74 list_key = 'default'
75 config['p_slug'] = SLUGS[list_key]['p_slug'] % {'user': config['username'], 'list': config['list']}
76 config['f_slug'] = SLUGS[list_key]['f_slug']
77 config['sort_by'] = SORT_BY[config['sort_by']]
78
79 return config
80
81 def tmdb_lookup(self, search):
82 tmdb = plugin.get_plugin_by_name('api_tmdb').instance.lookup(tmdb_id=search)
83 result = {
84 'title': '%s (%s)' % (tmdb.name, tmdb.year),
85 'imdb_id': tmdb.imdb_id,
86 'tmdb_id': tmdb.id,
87 'movie_name': tmdb.name,
88 'movie_year': tmdb.year
89 }
90
91 return result
92
93 def parse_film(self, film, config):
94 url = base_url + film.get(config['f_slug'])
95 soup = get_soup(requests.get(url).content)
96 result = self.tmdb_lookup(soup.find(attrs={'data-tmdb-id': True}).get('data-tmdb-id'))
97
98 entry = Entry(result)
99 entry['url'] = url
100 entry['letterboxd_list'] = '%s (%s)' % (config['list'], config['username'])
101 try:
102 entry['letterboxd_score'] = float(soup.find(itemprop='average').get('content'))
103 except AttributeError:
104 pass
105 if config['list'] == 'diary':
106 entry['letterboxd_uscore'] = int(film.find_next(attrs={'data-rating': True}).get('data-rating'))
107 elif config['list'] == 'rated':
108 entry['letterboxd_uscore'] = int(film.find_next(itemprop='rating').get('content'))
109
110 return entry
111
112 @cached('letterboxd', persist='2 hours')
113 def on_task_input(self, task, config=None):
114 config = self.build_config(config)
115 url = base_url + config['p_slug'] + config['sort_by']
116 max_results = config.get('max_results', 1)
117 rcount = 0
118 next_page = ''
119
120 log.verbose('Looking for films in Letterboxd list: %s' % url)
121
122 entries = []
123 while next_page is not None and rcount < max_results:
124 try:
125 page = requests.get(url).content
126 except RequestException as e:
127 raise plugin.PluginError('Error retrieving list from Letterboxd: %s' % e)
128 soup = get_soup(page)
129
130 for film in soup.find_all(attrs={config['f_slug']: True}):
131 if rcount < max_results:
132 entry = self.parse_film(film, config)
133 entries.append(entry)
134 if 'max_results' in config:
135 rcount += 1
136
137 next_page = soup.find(class_='paginate-next')
138 if next_page is not None:
139 next_page = next_page.get('href')
140 if next_page is not None:
141 url = base_url + next_page
142
143 return entries
144
145
146 @event('plugin.register')
147 def register_plugin():
148 plugin.register(Letterboxd, 'letterboxd', api_ver=2)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/input/letterboxd.py b/flexget/plugins/input/letterboxd.py
--- a/flexget/plugins/input/letterboxd.py
+++ b/flexget/plugins/input/letterboxd.py
@@ -134,7 +134,7 @@
if 'max_results' in config:
rcount += 1
- next_page = soup.find(class_='paginate-next')
+ next_page = soup.select_one('.paginate-nextprev .next')
if next_page is not None:
next_page = next_page.get('href')
if next_page is not None:
| {"golden_diff": "diff --git a/flexget/plugins/input/letterboxd.py b/flexget/plugins/input/letterboxd.py\n--- a/flexget/plugins/input/letterboxd.py\n+++ b/flexget/plugins/input/letterboxd.py\n@@ -134,7 +134,7 @@\n if 'max_results' in config:\n rcount += 1\n \n- next_page = soup.find(class_='paginate-next')\n+ next_page = soup.select_one('.paginate-nextprev .next')\n if next_page is not None:\n next_page = next_page.get('href')\n if next_page is not None:\n", "issue": "Letterboxd plugin does not paginate correctly\n### Expected behaviour:\r\n\r\nWhen using a letterboxd list as input, lists with more than 100 entries should produce more than 100 entries in flexget.\r\n\r\n### Actual behaviour:\r\n\r\nLetterboxd input is only able to generate <=100 entries.\r\n\r\n### Steps to reproduce:\r\n- Step 1: Configure a letterboxd input for a list that contains > 100 entries\r\n- Step 2: Run task and note that only 100 entries are generated.\r\n\r\n### Additional information:\r\n\r\nI see the problem:\r\n\r\nhttps://github.com/Flexget/Flexget/blob/83bc8dc86fd4760c4f059353975a24e04e681888/flexget/plugins/input/letterboxd.py#L137\r\n\r\nThe actual markup on page is (as of this writing):\r\n\r\n```html\r\n<div class=\"paginate-nextprev\">\r\n <a class=\"next\" href=\"/usrid/list/listname/page/pagenum/\">Next</a>\r\n</div>\r\n```\r\n\r\nSo the query should actually be '.paginate-nextprev .next' not '.paginate-next'. I'm assuming they changed their site. \n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.cached_input import cached\nfrom flexget.utils.requests import RequestException, Session, TimedLimiter\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('letterboxd')\n\nrequests = Session(max_retries=5)\nrequests.add_domain_limiter(TimedLimiter('letterboxd.com', '1 seconds'))\nbase_url = 'http://letterboxd.com'\n\nSLUGS = {\n 'default': {\n 'p_slug': '/%(user)s/list/%(list)s/',\n 'f_slug': 'data-film-slug'},\n 'diary': {\n 'p_slug': '/%(user)s/films/diary/',\n 'f_slug': 'data-film-slug'},\n 'likes': {\n 'p_slug': '/%(user)s/likes/films/',\n 'f_slug': 'data-film-link'},\n 'rated': {\n 'p_slug': '/%(user)s/films/ratings/',\n 'f_slug': 'data-film-slug'},\n 'watched': {\n 'p_slug': '/%(user)s/films/',\n 'f_slug': 'data-film-slug'},\n 'watchlist': {\n 'p_slug': '/%(user)s/watchlist/',\n 'f_slug': 'data-film-slug'}\n}\n\nSORT_BY = {\n 'default': '',\n 'added': 'by/added/',\n 'length-ascending': 'by/shortest/',\n 'length-descending': 'by/longest/',\n 'name': 'by/name/',\n 'popularity': 'by/popular/',\n 'rating-ascending': 'by/rating-lowest/',\n 'rating-descending': 'by/rating/',\n 'release-ascending': 'by/release-earliest/',\n 'release-descending': 'by/release/'\n}\n\n\nclass Letterboxd(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'list': {'type': 'string'},\n 'sort_by': {\n 'type': 'string',\n 'enum': list(SORT_BY.keys()),\n 'default': 'default'},\n 'max_results': {'type': 'integer'}\n },\n 'required': ['username', 'list'],\n 'additionalProperties': False\n }\n\n def build_config(self, config):\n config['list'] = config['list'].lower().replace(' ', '-')\n list_key = config['list']\n if list_key not in list(SLUGS.keys()):\n list_key = 'default'\n config['p_slug'] = SLUGS[list_key]['p_slug'] % {'user': config['username'], 'list': config['list']}\n config['f_slug'] = SLUGS[list_key]['f_slug']\n config['sort_by'] = SORT_BY[config['sort_by']]\n\n return config\n\n def tmdb_lookup(self, search):\n tmdb = plugin.get_plugin_by_name('api_tmdb').instance.lookup(tmdb_id=search)\n result = {\n 'title': '%s (%s)' % (tmdb.name, tmdb.year),\n 'imdb_id': tmdb.imdb_id,\n 'tmdb_id': tmdb.id,\n 'movie_name': tmdb.name,\n 'movie_year': tmdb.year\n }\n\n return result\n\n def parse_film(self, film, config):\n url = base_url + film.get(config['f_slug'])\n soup = get_soup(requests.get(url).content)\n result = self.tmdb_lookup(soup.find(attrs={'data-tmdb-id': True}).get('data-tmdb-id'))\n\n entry = Entry(result)\n entry['url'] = url\n entry['letterboxd_list'] = '%s (%s)' % (config['list'], config['username'])\n try:\n entry['letterboxd_score'] = float(soup.find(itemprop='average').get('content'))\n except AttributeError:\n pass\n if config['list'] == 'diary':\n entry['letterboxd_uscore'] = int(film.find_next(attrs={'data-rating': True}).get('data-rating'))\n elif config['list'] == 'rated':\n entry['letterboxd_uscore'] = int(film.find_next(itemprop='rating').get('content'))\n\n return entry\n\n @cached('letterboxd', persist='2 hours')\n def on_task_input(self, task, config=None):\n config = self.build_config(config)\n url = base_url + config['p_slug'] + config['sort_by']\n max_results = config.get('max_results', 1)\n rcount = 0\n next_page = ''\n\n log.verbose('Looking for films in Letterboxd list: %s' % url)\n\n entries = []\n while next_page is not None and rcount < max_results:\n try:\n page = requests.get(url).content\n except RequestException as e:\n raise plugin.PluginError('Error retrieving list from Letterboxd: %s' % e)\n soup = get_soup(page)\n\n for film in soup.find_all(attrs={config['f_slug']: True}):\n if rcount < max_results:\n entry = self.parse_film(film, config)\n entries.append(entry)\n if 'max_results' in config:\n rcount += 1\n\n next_page = soup.find(class_='paginate-next')\n if next_page is not None:\n next_page = next_page.get('href')\n if next_page is not None:\n url = base_url + next_page\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(Letterboxd, 'letterboxd', api_ver=2)\n", "path": "flexget/plugins/input/letterboxd.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.cached_input import cached\nfrom flexget.utils.requests import RequestException, Session, TimedLimiter\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('letterboxd')\n\nrequests = Session(max_retries=5)\nrequests.add_domain_limiter(TimedLimiter('letterboxd.com', '1 seconds'))\nbase_url = 'http://letterboxd.com'\n\nSLUGS = {\n 'default': {\n 'p_slug': '/%(user)s/list/%(list)s/',\n 'f_slug': 'data-film-slug'},\n 'diary': {\n 'p_slug': '/%(user)s/films/diary/',\n 'f_slug': 'data-film-slug'},\n 'likes': {\n 'p_slug': '/%(user)s/likes/films/',\n 'f_slug': 'data-film-link'},\n 'rated': {\n 'p_slug': '/%(user)s/films/ratings/',\n 'f_slug': 'data-film-slug'},\n 'watched': {\n 'p_slug': '/%(user)s/films/',\n 'f_slug': 'data-film-slug'},\n 'watchlist': {\n 'p_slug': '/%(user)s/watchlist/',\n 'f_slug': 'data-film-slug'}\n}\n\nSORT_BY = {\n 'default': '',\n 'added': 'by/added/',\n 'length-ascending': 'by/shortest/',\n 'length-descending': 'by/longest/',\n 'name': 'by/name/',\n 'popularity': 'by/popular/',\n 'rating-ascending': 'by/rating-lowest/',\n 'rating-descending': 'by/rating/',\n 'release-ascending': 'by/release-earliest/',\n 'release-descending': 'by/release/'\n}\n\n\nclass Letterboxd(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'list': {'type': 'string'},\n 'sort_by': {\n 'type': 'string',\n 'enum': list(SORT_BY.keys()),\n 'default': 'default'},\n 'max_results': {'type': 'integer'}\n },\n 'required': ['username', 'list'],\n 'additionalProperties': False\n }\n\n def build_config(self, config):\n config['list'] = config['list'].lower().replace(' ', '-')\n list_key = config['list']\n if list_key not in list(SLUGS.keys()):\n list_key = 'default'\n config['p_slug'] = SLUGS[list_key]['p_slug'] % {'user': config['username'], 'list': config['list']}\n config['f_slug'] = SLUGS[list_key]['f_slug']\n config['sort_by'] = SORT_BY[config['sort_by']]\n\n return config\n\n def tmdb_lookup(self, search):\n tmdb = plugin.get_plugin_by_name('api_tmdb').instance.lookup(tmdb_id=search)\n result = {\n 'title': '%s (%s)' % (tmdb.name, tmdb.year),\n 'imdb_id': tmdb.imdb_id,\n 'tmdb_id': tmdb.id,\n 'movie_name': tmdb.name,\n 'movie_year': tmdb.year\n }\n\n return result\n\n def parse_film(self, film, config):\n url = base_url + film.get(config['f_slug'])\n soup = get_soup(requests.get(url).content)\n result = self.tmdb_lookup(soup.find(attrs={'data-tmdb-id': True}).get('data-tmdb-id'))\n\n entry = Entry(result)\n entry['url'] = url\n entry['letterboxd_list'] = '%s (%s)' % (config['list'], config['username'])\n try:\n entry['letterboxd_score'] = float(soup.find(itemprop='average').get('content'))\n except AttributeError:\n pass\n if config['list'] == 'diary':\n entry['letterboxd_uscore'] = int(film.find_next(attrs={'data-rating': True}).get('data-rating'))\n elif config['list'] == 'rated':\n entry['letterboxd_uscore'] = int(film.find_next(itemprop='rating').get('content'))\n\n return entry\n\n @cached('letterboxd', persist='2 hours')\n def on_task_input(self, task, config=None):\n config = self.build_config(config)\n url = base_url + config['p_slug'] + config['sort_by']\n max_results = config.get('max_results', 1)\n rcount = 0\n next_page = ''\n\n log.verbose('Looking for films in Letterboxd list: %s' % url)\n\n entries = []\n while next_page is not None and rcount < max_results:\n try:\n page = requests.get(url).content\n except RequestException as e:\n raise plugin.PluginError('Error retrieving list from Letterboxd: %s' % e)\n soup = get_soup(page)\n\n for film in soup.find_all(attrs={config['f_slug']: True}):\n if rcount < max_results:\n entry = self.parse_film(film, config)\n entries.append(entry)\n if 'max_results' in config:\n rcount += 1\n\n next_page = soup.select_one('.paginate-nextprev .next')\n if next_page is not None:\n next_page = next_page.get('href')\n if next_page is not None:\n url = base_url + next_page\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(Letterboxd, 'letterboxd', api_ver=2)\n", "path": "flexget/plugins/input/letterboxd.py"}]} | 2,154 | 135 |
gh_patches_debug_15683 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ME failing since at least 2018-10-25
ME has been failing since 2018-10-25
Based on automated runs it appears that ME has not run successfully in 2 days (2018-10-25).
```
loaded Open States pupa settings...
me (scrape, import)
bills: {}
Traceback (most recent call last):
File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module>
load_entry_point('pupa', 'console_scripts', 'pupa')()
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 68, in main
subcommands[args.subcommand].handle(args, other)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle
return self.do_handle(args, other, juris)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 301, in do_handle
self.check_session_list(juris)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 228, in check_session_list
scraped_sessions = juris.get_session_list()
File "/opt/openstates/openstates/openstates/me/__init__.py", line 98, in get_session_list
sessions.remove('jb-Test')
ValueError: list.remove(x): x not in list
```
Visit http://bobsled.openstates.org for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/me/__init__.py`
Content:
```
1 from pupa.scrape import Jurisdiction, Organization
2 from openstates.utils import url_xpath
3
4 from .bills import MEBillScraper
5 # from .people import MEPersonScraper
6 # from .committees import MECommitteeScraper
7
8
9 class Maine(Jurisdiction):
10 division_id = "ocd-division/country:us/state:me"
11 classification = "government"
12 name = "Maine"
13 url = "http://legislature.maine.gov"
14 scrapers = {
15 'bills': MEBillScraper,
16 # 'people': MEPersonScraper,
17 # 'committees': MECommitteeScraper,
18 }
19 legislative_sessions = [
20 {
21 "_scraped_name": "121st Legislature",
22 "identifier": "121",
23 "name": "121st Legislature (2003-2004)"
24 },
25 {
26 "_scraped_name": "122nd Legislature",
27 "identifier": "122",
28 "name": "122nd Legislature (2005-2006)"
29 },
30 {
31 "_scraped_name": "123rd Legislature",
32 "identifier": "123",
33 "name": "123rd Legislature (2007-2008)"
34 },
35 {
36 "_scraped_name": "124th Legislature",
37 "identifier": "124",
38 "name": "124th Legislature (2009-2010)"
39 },
40 {
41 "_scraped_name": "125th Legislature",
42 "identifier": "125",
43 "name": "125th Legislature (2011-2012)"
44 },
45 {
46 "_scraped_name": "126th Legislature",
47 "identifier": "126",
48 "name": "126th Legislature (2013-2014)"
49 },
50 {
51 "_scraped_name": "127th Legislature",
52 "identifier": "127",
53 "name": "127th Legislature (2015-2016)"
54 },
55 {
56 "_scraped_name": "128th Legislature",
57 "identifier": "128",
58 "name": "128th Legislature (2017-2018)",
59 "start_date": "2016-12-07",
60 "end_date": "2017-06-14",
61 }
62 ]
63 ignored_scraped_sessions = []
64
65 def get_organizations(self):
66 legislature_name = "Maine Legislature"
67 lower_chamber_name = "House"
68 lower_seats = 151
69 lower_title = "Representative"
70 upper_chamber_name = "Senate"
71 upper_seats = 35
72 upper_title = "Senator"
73
74 legislature = Organization(name=legislature_name,
75 classification="legislature")
76 upper = Organization(upper_chamber_name, classification='upper',
77 parent_id=legislature._id)
78 lower = Organization(lower_chamber_name, classification='lower',
79 parent_id=legislature._id)
80
81 for n in range(1, upper_seats + 1):
82 upper.add_post(
83 label=str(n), role=upper_title,
84 division_id='{}/sldu:{}'.format(self.division_id, n))
85 for n in range(1, lower_seats + 1):
86 lower.add_post(
87 label=str(n), role=lower_title,
88 division_id='{}/sldl:{}'.format(self.division_id, n))
89
90 yield legislature
91 yield Organization(name='Office of the Governor', classification='executive')
92 yield upper
93 yield lower
94
95 def get_session_list(self):
96 sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',
97 '//select[@name="LegSession"]/option/text()')
98 sessions.remove('jb-Test')
99 sessions.remove('2001-2002')
100 return sessions
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/me/__init__.py b/openstates/me/__init__.py
--- a/openstates/me/__init__.py
+++ b/openstates/me/__init__.py
@@ -60,7 +60,9 @@
"end_date": "2017-06-14",
}
]
- ignored_scraped_sessions = []
+ ignored_scraped_sessions = [
+ '2001-2002'
+ ]
def get_organizations(self):
legislature_name = "Maine Legislature"
@@ -95,6 +97,4 @@
def get_session_list(self):
sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',
'//select[@name="LegSession"]/option/text()')
- sessions.remove('jb-Test')
- sessions.remove('2001-2002')
return sessions
| {"golden_diff": "diff --git a/openstates/me/__init__.py b/openstates/me/__init__.py\n--- a/openstates/me/__init__.py\n+++ b/openstates/me/__init__.py\n@@ -60,7 +60,9 @@\n \"end_date\": \"2017-06-14\",\n }\n ]\n- ignored_scraped_sessions = []\n+ ignored_scraped_sessions = [\n+ '2001-2002'\n+ ]\n \n def get_organizations(self):\n legislature_name = \"Maine Legislature\"\n@@ -95,6 +97,4 @@\n def get_session_list(self):\n sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',\n '//select[@name=\"LegSession\"]/option/text()')\n- sessions.remove('jb-Test')\n- sessions.remove('2001-2002')\n return sessions\n", "issue": "ME failing since at least 2018-10-25\nME has been failing since 2018-10-25\n\nBased on automated runs it appears that ME has not run successfully in 2 days (2018-10-25).\n\n\n```\n loaded Open States pupa settings...\nme (scrape, import)\n bills: {}\nTraceback (most recent call last):\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 68, in main\n subcommands[args.subcommand].handle(args, other)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 301, in do_handle\n self.check_session_list(juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 228, in check_session_list\n scraped_sessions = juris.get_session_list()\n File \"/opt/openstates/openstates/openstates/me/__init__.py\", line 98, in get_session_list\n sessions.remove('jb-Test')\nValueError: list.remove(x): x not in list\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .bills import MEBillScraper\n# from .people import MEPersonScraper\n# from .committees import MECommitteeScraper\n\n\nclass Maine(Jurisdiction):\n division_id = \"ocd-division/country:us/state:me\"\n classification = \"government\"\n name = \"Maine\"\n url = \"http://legislature.maine.gov\"\n scrapers = {\n 'bills': MEBillScraper,\n # 'people': MEPersonScraper,\n # 'committees': MECommitteeScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"121st Legislature\",\n \"identifier\": \"121\",\n \"name\": \"121st Legislature (2003-2004)\"\n },\n {\n \"_scraped_name\": \"122nd Legislature\",\n \"identifier\": \"122\",\n \"name\": \"122nd Legislature (2005-2006)\"\n },\n {\n \"_scraped_name\": \"123rd Legislature\",\n \"identifier\": \"123\",\n \"name\": \"123rd Legislature (2007-2008)\"\n },\n {\n \"_scraped_name\": \"124th Legislature\",\n \"identifier\": \"124\",\n \"name\": \"124th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"125th Legislature\",\n \"identifier\": \"125\",\n \"name\": \"125th Legislature (2011-2012)\"\n },\n {\n \"_scraped_name\": \"126th Legislature\",\n \"identifier\": \"126\",\n \"name\": \"126th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"127th Legislature\",\n \"identifier\": \"127\",\n \"name\": \"127th Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"128th Legislature\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2017-2018)\",\n \"start_date\": \"2016-12-07\",\n \"end_date\": \"2017-06-14\",\n }\n ]\n ignored_scraped_sessions = []\n\n def get_organizations(self):\n legislature_name = \"Maine Legislature\"\n lower_chamber_name = \"House\"\n lower_seats = 151\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 35\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield Organization(name='Office of the Governor', classification='executive')\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',\n '//select[@name=\"LegSession\"]/option/text()')\n sessions.remove('jb-Test')\n sessions.remove('2001-2002')\n return sessions\n", "path": "openstates/me/__init__.py"}], "after_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .bills import MEBillScraper\n# from .people import MEPersonScraper\n# from .committees import MECommitteeScraper\n\n\nclass Maine(Jurisdiction):\n division_id = \"ocd-division/country:us/state:me\"\n classification = \"government\"\n name = \"Maine\"\n url = \"http://legislature.maine.gov\"\n scrapers = {\n 'bills': MEBillScraper,\n # 'people': MEPersonScraper,\n # 'committees': MECommitteeScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"121st Legislature\",\n \"identifier\": \"121\",\n \"name\": \"121st Legislature (2003-2004)\"\n },\n {\n \"_scraped_name\": \"122nd Legislature\",\n \"identifier\": \"122\",\n \"name\": \"122nd Legislature (2005-2006)\"\n },\n {\n \"_scraped_name\": \"123rd Legislature\",\n \"identifier\": \"123\",\n \"name\": \"123rd Legislature (2007-2008)\"\n },\n {\n \"_scraped_name\": \"124th Legislature\",\n \"identifier\": \"124\",\n \"name\": \"124th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"125th Legislature\",\n \"identifier\": \"125\",\n \"name\": \"125th Legislature (2011-2012)\"\n },\n {\n \"_scraped_name\": \"126th Legislature\",\n \"identifier\": \"126\",\n \"name\": \"126th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"127th Legislature\",\n \"identifier\": \"127\",\n \"name\": \"127th Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"128th Legislature\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2017-2018)\",\n \"start_date\": \"2016-12-07\",\n \"end_date\": \"2017-06-14\",\n }\n ]\n ignored_scraped_sessions = [\n '2001-2002'\n ]\n\n def get_organizations(self):\n legislature_name = \"Maine Legislature\"\n lower_chamber_name = \"House\"\n lower_seats = 151\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 35\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield Organization(name='Office of the Governor', classification='executive')\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',\n '//select[@name=\"LegSession\"]/option/text()')\n return sessions\n", "path": "openstates/me/__init__.py"}]} | 1,725 | 208 |
gh_patches_debug_4489 | rasdani/github-patches | git_diff | apache__airflow-25305 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ParamsDict represents the class object itself, not keys and values on Task Instance Details
### Apache Airflow version
2.3.3 (latest released)
### What happened
ParamsDict's printable presentation shows the class object itself like `<airflow.models.param.ParamsDict object at 0x7fd0eba9bb80>` on the page of Task Instance Detail because it does not have `__repr__` method in its class.
<img width="791" alt="image" src="https://user-images.githubusercontent.com/16971553/180902761-88b9dd9f-7102-4e49-b8b8-0282b31dda56.png">
It used to be `dict` object and what keys and values Params include are shown on UI before replacing Params with the advanced Params by #17100.
### What you think should happen instead
It was originally shown below when it was `dict` object.

I think it can be fixed by adding `__repr__` method to the class like below.
```python
class ParamsDict(dict):
...
def __repr__(self):
return f"{self.dump()}"
```
### How to reproduce
I guess it all happens on Airflow using 2.2.0+
### Operating System
Linux, but it's not depending on OS
### Versions of Apache Airflow Providers
_No response_
### Deployment
Other
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/models/param.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 import contextlib
18 import copy
19 import json
20 import warnings
21 from typing import TYPE_CHECKING, Any, Dict, ItemsView, MutableMapping, Optional, ValuesView
22
23 from airflow.exceptions import AirflowException, ParamValidationError
24 from airflow.utils.context import Context
25 from airflow.utils.types import NOTSET, ArgNotSet
26
27 if TYPE_CHECKING:
28 from airflow.models.dag import DAG
29
30
31 class Param:
32 """
33 Class to hold the default value of a Param and rule set to do the validations. Without the rule set
34 it always validates and returns the default value.
35
36 :param default: The value this Param object holds
37 :param description: Optional help text for the Param
38 :param schema: The validation schema of the Param, if not given then all kwargs except
39 default & description will form the schema
40 """
41
42 CLASS_IDENTIFIER = '__class'
43
44 def __init__(self, default: Any = NOTSET, description: Optional[str] = None, **kwargs):
45 if default is not NOTSET:
46 self._warn_if_not_json(default)
47 self.value = default
48 self.description = description
49 self.schema = kwargs.pop('schema') if 'schema' in kwargs else kwargs
50
51 def __copy__(self) -> "Param":
52 return Param(self.value, self.description, schema=self.schema)
53
54 @staticmethod
55 def _warn_if_not_json(value):
56 try:
57 json.dumps(value)
58 except Exception:
59 warnings.warn(
60 "The use of non-json-serializable params is deprecated and will be removed in "
61 "a future release",
62 DeprecationWarning,
63 )
64
65 def resolve(self, value: Any = NOTSET, suppress_exception: bool = False) -> Any:
66 """
67 Runs the validations and returns the Param's final value.
68 May raise ValueError on failed validations, or TypeError
69 if no value is passed and no value already exists.
70 We first check that value is json-serializable; if not, warn.
71 In future release we will require the value to be json-serializable.
72
73 :param value: The value to be updated for the Param
74 :param suppress_exception: To raise an exception or not when the validations fails.
75 If true and validations fails, the return value would be None.
76 """
77 import jsonschema
78 from jsonschema import FormatChecker
79 from jsonschema.exceptions import ValidationError
80
81 if value is not NOTSET:
82 self._warn_if_not_json(value)
83 final_val = value if value is not NOTSET else self.value
84 if isinstance(final_val, ArgNotSet):
85 if suppress_exception:
86 return None
87 raise ParamValidationError("No value passed and Param has no default value")
88 try:
89 jsonschema.validate(final_val, self.schema, format_checker=FormatChecker())
90 except ValidationError as err:
91 if suppress_exception:
92 return None
93 raise ParamValidationError(err) from None
94 self.value = final_val
95 return final_val
96
97 def dump(self) -> dict:
98 """Dump the Param as a dictionary"""
99 out_dict = {self.CLASS_IDENTIFIER: f'{self.__module__}.{self.__class__.__name__}'}
100 out_dict.update(self.__dict__)
101 return out_dict
102
103 @property
104 def has_value(self) -> bool:
105 return self.value is not NOTSET
106
107
108 class ParamsDict(MutableMapping[str, Any]):
109 """
110 Class to hold all params for dags or tasks. All the keys are strictly string and values
111 are converted into Param's object if they are not already. This class is to replace param's
112 dictionary implicitly and ideally not needed to be used directly.
113 """
114
115 __slots__ = ['__dict', 'suppress_exception']
116
117 def __init__(self, dict_obj: Optional[Dict] = None, suppress_exception: bool = False):
118 """
119 :param dict_obj: A dict or dict like object to init ParamsDict
120 :param suppress_exception: Flag to suppress value exceptions while initializing the ParamsDict
121 """
122 params_dict: Dict[str, Param] = {}
123 dict_obj = dict_obj or {}
124 for k, v in dict_obj.items():
125 if not isinstance(v, Param):
126 params_dict[k] = Param(v)
127 else:
128 params_dict[k] = v
129 self.__dict = params_dict
130 self.suppress_exception = suppress_exception
131
132 def __copy__(self) -> "ParamsDict":
133 return ParamsDict(self.__dict, self.suppress_exception)
134
135 def __deepcopy__(self, memo: Optional[Dict[int, Any]]) -> "ParamsDict":
136 return ParamsDict(copy.deepcopy(self.__dict, memo), self.suppress_exception)
137
138 def __contains__(self, o: object) -> bool:
139 return o in self.__dict
140
141 def __len__(self) -> int:
142 return len(self.__dict)
143
144 def __delitem__(self, v: str) -> None:
145 del self.__dict[v]
146
147 def __iter__(self):
148 return iter(self.__dict)
149
150 def __setitem__(self, key: str, value: Any) -> None:
151 """
152 Override for dictionary's ``setitem`` method. This method make sure that all values are of
153 Param's type only.
154
155 :param key: A key which needs to be inserted or updated in the dict
156 :param value: A value which needs to be set against the key. It could be of any
157 type but will be converted and stored as a Param object eventually.
158 """
159 if isinstance(value, Param):
160 param = value
161 elif key in self.__dict:
162 param = self.__dict[key]
163 try:
164 param.resolve(value=value, suppress_exception=self.suppress_exception)
165 except ParamValidationError as ve:
166 raise ParamValidationError(f'Invalid input for param {key}: {ve}') from None
167 else:
168 # if the key isn't there already and if the value isn't of Param type create a new Param object
169 param = Param(value)
170
171 self.__dict[key] = param
172
173 def __getitem__(self, key: str) -> Any:
174 """
175 Override for dictionary's ``getitem`` method. After fetching the key, it would call the
176 resolve method as well on the Param object.
177
178 :param key: The key to fetch
179 """
180 param = self.__dict[key]
181 return param.resolve(suppress_exception=self.suppress_exception)
182
183 def get_param(self, key: str) -> Param:
184 """Get the internal :class:`.Param` object for this key"""
185 return self.__dict[key]
186
187 def items(self):
188 return ItemsView(self.__dict)
189
190 def values(self):
191 return ValuesView(self.__dict)
192
193 def update(self, *args, **kwargs) -> None:
194 if len(args) == 1 and not kwargs and isinstance(args[0], ParamsDict):
195 return super().update(args[0].__dict)
196 super().update(*args, **kwargs)
197
198 def dump(self) -> Dict[str, Any]:
199 """Dumps the ParamsDict object as a dictionary, while suppressing exceptions"""
200 return {k: v.resolve(suppress_exception=True) for k, v in self.items()}
201
202 def validate(self) -> Dict[str, Any]:
203 """Validates & returns all the Params object stored in the dictionary"""
204 resolved_dict = {}
205 try:
206 for k, v in self.items():
207 resolved_dict[k] = v.resolve(suppress_exception=self.suppress_exception)
208 except ParamValidationError as ve:
209 raise ParamValidationError(f'Invalid input for param {k}: {ve}') from None
210
211 return resolved_dict
212
213
214 class DagParam:
215 """
216 Class that represents a DAG run parameter & binds a simple Param object to a name within a DAG instance,
217 so that it can be resolved during the run time via ``{{ context }}`` dictionary. The ideal use case of
218 this class is to implicitly convert args passed to a method which is being decorated by ``@dag`` keyword.
219
220 It can be used to parameterize your dags. You can overwrite its value by setting it on conf
221 when you trigger your DagRun.
222
223 This can also be used in templates by accessing ``{{context.params}}`` dictionary.
224
225 **Example**:
226
227 with DAG(...) as dag:
228 EmailOperator(subject=dag.param('subject', 'Hi from Airflow!'))
229
230 :param current_dag: Dag being used for parameter.
231 :param name: key value which is used to set the parameter
232 :param default: Default value used if no parameter was set.
233 """
234
235 def __init__(self, current_dag: "DAG", name: str, default: Any = NOTSET):
236 if default is not NOTSET:
237 current_dag.params[name] = default
238 self._name = name
239 self._default = default
240
241 def resolve(self, context: Context) -> Any:
242 """Pull DagParam value from DagRun context. This method is run during ``op.execute()``."""
243 with contextlib.suppress(KeyError):
244 return context['dag_run'].conf[self._name]
245 if self._default is not NOTSET:
246 return self._default
247 with contextlib.suppress(KeyError):
248 return context['params'][self._name]
249 raise AirflowException(f'No value could be resolved for parameter {self._name}')
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/models/param.py b/airflow/models/param.py
--- a/airflow/models/param.py
+++ b/airflow/models/param.py
@@ -147,6 +147,9 @@
def __iter__(self):
return iter(self.__dict)
+ def __repr__(self):
+ return repr(self.dump())
+
def __setitem__(self, key: str, value: Any) -> None:
"""
Override for dictionary's ``setitem`` method. This method make sure that all values are of
| {"golden_diff": "diff --git a/airflow/models/param.py b/airflow/models/param.py\n--- a/airflow/models/param.py\n+++ b/airflow/models/param.py\n@@ -147,6 +147,9 @@\n def __iter__(self):\n return iter(self.__dict)\n \n+ def __repr__(self):\n+ return repr(self.dump())\n+\n def __setitem__(self, key: str, value: Any) -> None:\n \"\"\"\n Override for dictionary's ``setitem`` method. This method make sure that all values are of\n", "issue": "ParamsDict represents the class object itself, not keys and values on Task Instance Details\n### Apache Airflow version\r\n\r\n2.3.3 (latest released)\r\n\r\n### What happened\r\n\r\nParamsDict's printable presentation shows the class object itself like `<airflow.models.param.ParamsDict object at 0x7fd0eba9bb80>` on the page of Task Instance Detail because it does not have `__repr__` method in its class. \r\n\r\n<img width=\"791\" alt=\"image\" src=\"https://user-images.githubusercontent.com/16971553/180902761-88b9dd9f-7102-4e49-b8b8-0282b31dda56.png\">\r\n\r\nIt used to be `dict` object and what keys and values Params include are shown on UI before replacing Params with the advanced Params by #17100. \r\n\r\n\r\n\r\n### What you think should happen instead\r\n\r\nIt was originally shown below when it was `dict` object. \r\n\r\n\r\n\r\n\r\nI think it can be fixed by adding `__repr__` method to the class like below.\r\n\r\n```python\r\nclass ParamsDict(dict):\r\n ...\r\n def __repr__(self):\r\n return f\"{self.dump()}\"\r\n```\r\n\r\n### How to reproduce\r\n\r\nI guess it all happens on Airflow using 2.2.0+\r\n\r\n### Operating System\r\n\r\nLinux, but it's not depending on OS\r\n\r\n### Versions of Apache Airflow Providers\r\n\r\n_No response_\r\n\r\n### Deployment\r\n\r\nOther\r\n\r\n### Deployment details\r\n\r\n_No response_\r\n\r\n### Anything else\r\n\r\n_No response_\r\n\r\n### Are you willing to submit PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport contextlib\nimport copy\nimport json\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Dict, ItemsView, MutableMapping, Optional, ValuesView\n\nfrom airflow.exceptions import AirflowException, ParamValidationError\nfrom airflow.utils.context import Context\nfrom airflow.utils.types import NOTSET, ArgNotSet\n\nif TYPE_CHECKING:\n from airflow.models.dag import DAG\n\n\nclass Param:\n \"\"\"\n Class to hold the default value of a Param and rule set to do the validations. Without the rule set\n it always validates and returns the default value.\n\n :param default: The value this Param object holds\n :param description: Optional help text for the Param\n :param schema: The validation schema of the Param, if not given then all kwargs except\n default & description will form the schema\n \"\"\"\n\n CLASS_IDENTIFIER = '__class'\n\n def __init__(self, default: Any = NOTSET, description: Optional[str] = None, **kwargs):\n if default is not NOTSET:\n self._warn_if_not_json(default)\n self.value = default\n self.description = description\n self.schema = kwargs.pop('schema') if 'schema' in kwargs else kwargs\n\n def __copy__(self) -> \"Param\":\n return Param(self.value, self.description, schema=self.schema)\n\n @staticmethod\n def _warn_if_not_json(value):\n try:\n json.dumps(value)\n except Exception:\n warnings.warn(\n \"The use of non-json-serializable params is deprecated and will be removed in \"\n \"a future release\",\n DeprecationWarning,\n )\n\n def resolve(self, value: Any = NOTSET, suppress_exception: bool = False) -> Any:\n \"\"\"\n Runs the validations and returns the Param's final value.\n May raise ValueError on failed validations, or TypeError\n if no value is passed and no value already exists.\n We first check that value is json-serializable; if not, warn.\n In future release we will require the value to be json-serializable.\n\n :param value: The value to be updated for the Param\n :param suppress_exception: To raise an exception or not when the validations fails.\n If true and validations fails, the return value would be None.\n \"\"\"\n import jsonschema\n from jsonschema import FormatChecker\n from jsonschema.exceptions import ValidationError\n\n if value is not NOTSET:\n self._warn_if_not_json(value)\n final_val = value if value is not NOTSET else self.value\n if isinstance(final_val, ArgNotSet):\n if suppress_exception:\n return None\n raise ParamValidationError(\"No value passed and Param has no default value\")\n try:\n jsonschema.validate(final_val, self.schema, format_checker=FormatChecker())\n except ValidationError as err:\n if suppress_exception:\n return None\n raise ParamValidationError(err) from None\n self.value = final_val\n return final_val\n\n def dump(self) -> dict:\n \"\"\"Dump the Param as a dictionary\"\"\"\n out_dict = {self.CLASS_IDENTIFIER: f'{self.__module__}.{self.__class__.__name__}'}\n out_dict.update(self.__dict__)\n return out_dict\n\n @property\n def has_value(self) -> bool:\n return self.value is not NOTSET\n\n\nclass ParamsDict(MutableMapping[str, Any]):\n \"\"\"\n Class to hold all params for dags or tasks. All the keys are strictly string and values\n are converted into Param's object if they are not already. This class is to replace param's\n dictionary implicitly and ideally not needed to be used directly.\n \"\"\"\n\n __slots__ = ['__dict', 'suppress_exception']\n\n def __init__(self, dict_obj: Optional[Dict] = None, suppress_exception: bool = False):\n \"\"\"\n :param dict_obj: A dict or dict like object to init ParamsDict\n :param suppress_exception: Flag to suppress value exceptions while initializing the ParamsDict\n \"\"\"\n params_dict: Dict[str, Param] = {}\n dict_obj = dict_obj or {}\n for k, v in dict_obj.items():\n if not isinstance(v, Param):\n params_dict[k] = Param(v)\n else:\n params_dict[k] = v\n self.__dict = params_dict\n self.suppress_exception = suppress_exception\n\n def __copy__(self) -> \"ParamsDict\":\n return ParamsDict(self.__dict, self.suppress_exception)\n\n def __deepcopy__(self, memo: Optional[Dict[int, Any]]) -> \"ParamsDict\":\n return ParamsDict(copy.deepcopy(self.__dict, memo), self.suppress_exception)\n\n def __contains__(self, o: object) -> bool:\n return o in self.__dict\n\n def __len__(self) -> int:\n return len(self.__dict)\n\n def __delitem__(self, v: str) -> None:\n del self.__dict[v]\n\n def __iter__(self):\n return iter(self.__dict)\n\n def __setitem__(self, key: str, value: Any) -> None:\n \"\"\"\n Override for dictionary's ``setitem`` method. This method make sure that all values are of\n Param's type only.\n\n :param key: A key which needs to be inserted or updated in the dict\n :param value: A value which needs to be set against the key. It could be of any\n type but will be converted and stored as a Param object eventually.\n \"\"\"\n if isinstance(value, Param):\n param = value\n elif key in self.__dict:\n param = self.__dict[key]\n try:\n param.resolve(value=value, suppress_exception=self.suppress_exception)\n except ParamValidationError as ve:\n raise ParamValidationError(f'Invalid input for param {key}: {ve}') from None\n else:\n # if the key isn't there already and if the value isn't of Param type create a new Param object\n param = Param(value)\n\n self.__dict[key] = param\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"\n Override for dictionary's ``getitem`` method. After fetching the key, it would call the\n resolve method as well on the Param object.\n\n :param key: The key to fetch\n \"\"\"\n param = self.__dict[key]\n return param.resolve(suppress_exception=self.suppress_exception)\n\n def get_param(self, key: str) -> Param:\n \"\"\"Get the internal :class:`.Param` object for this key\"\"\"\n return self.__dict[key]\n\n def items(self):\n return ItemsView(self.__dict)\n\n def values(self):\n return ValuesView(self.__dict)\n\n def update(self, *args, **kwargs) -> None:\n if len(args) == 1 and not kwargs and isinstance(args[0], ParamsDict):\n return super().update(args[0].__dict)\n super().update(*args, **kwargs)\n\n def dump(self) -> Dict[str, Any]:\n \"\"\"Dumps the ParamsDict object as a dictionary, while suppressing exceptions\"\"\"\n return {k: v.resolve(suppress_exception=True) for k, v in self.items()}\n\n def validate(self) -> Dict[str, Any]:\n \"\"\"Validates & returns all the Params object stored in the dictionary\"\"\"\n resolved_dict = {}\n try:\n for k, v in self.items():\n resolved_dict[k] = v.resolve(suppress_exception=self.suppress_exception)\n except ParamValidationError as ve:\n raise ParamValidationError(f'Invalid input for param {k}: {ve}') from None\n\n return resolved_dict\n\n\nclass DagParam:\n \"\"\"\n Class that represents a DAG run parameter & binds a simple Param object to a name within a DAG instance,\n so that it can be resolved during the run time via ``{{ context }}`` dictionary. The ideal use case of\n this class is to implicitly convert args passed to a method which is being decorated by ``@dag`` keyword.\n\n It can be used to parameterize your dags. You can overwrite its value by setting it on conf\n when you trigger your DagRun.\n\n This can also be used in templates by accessing ``{{context.params}}`` dictionary.\n\n **Example**:\n\n with DAG(...) as dag:\n EmailOperator(subject=dag.param('subject', 'Hi from Airflow!'))\n\n :param current_dag: Dag being used for parameter.\n :param name: key value which is used to set the parameter\n :param default: Default value used if no parameter was set.\n \"\"\"\n\n def __init__(self, current_dag: \"DAG\", name: str, default: Any = NOTSET):\n if default is not NOTSET:\n current_dag.params[name] = default\n self._name = name\n self._default = default\n\n def resolve(self, context: Context) -> Any:\n \"\"\"Pull DagParam value from DagRun context. This method is run during ``op.execute()``.\"\"\"\n with contextlib.suppress(KeyError):\n return context['dag_run'].conf[self._name]\n if self._default is not NOTSET:\n return self._default\n with contextlib.suppress(KeyError):\n return context['params'][self._name]\n raise AirflowException(f'No value could be resolved for parameter {self._name}')\n", "path": "airflow/models/param.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport contextlib\nimport copy\nimport json\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Dict, ItemsView, MutableMapping, Optional, ValuesView\n\nfrom airflow.exceptions import AirflowException, ParamValidationError\nfrom airflow.utils.context import Context\nfrom airflow.utils.types import NOTSET, ArgNotSet\n\nif TYPE_CHECKING:\n from airflow.models.dag import DAG\n\n\nclass Param:\n \"\"\"\n Class to hold the default value of a Param and rule set to do the validations. Without the rule set\n it always validates and returns the default value.\n\n :param default: The value this Param object holds\n :param description: Optional help text for the Param\n :param schema: The validation schema of the Param, if not given then all kwargs except\n default & description will form the schema\n \"\"\"\n\n CLASS_IDENTIFIER = '__class'\n\n def __init__(self, default: Any = NOTSET, description: Optional[str] = None, **kwargs):\n if default is not NOTSET:\n self._warn_if_not_json(default)\n self.value = default\n self.description = description\n self.schema = kwargs.pop('schema') if 'schema' in kwargs else kwargs\n\n def __copy__(self) -> \"Param\":\n return Param(self.value, self.description, schema=self.schema)\n\n @staticmethod\n def _warn_if_not_json(value):\n try:\n json.dumps(value)\n except Exception:\n warnings.warn(\n \"The use of non-json-serializable params is deprecated and will be removed in \"\n \"a future release\",\n DeprecationWarning,\n )\n\n def resolve(self, value: Any = NOTSET, suppress_exception: bool = False) -> Any:\n \"\"\"\n Runs the validations and returns the Param's final value.\n May raise ValueError on failed validations, or TypeError\n if no value is passed and no value already exists.\n We first check that value is json-serializable; if not, warn.\n In future release we will require the value to be json-serializable.\n\n :param value: The value to be updated for the Param\n :param suppress_exception: To raise an exception or not when the validations fails.\n If true and validations fails, the return value would be None.\n \"\"\"\n import jsonschema\n from jsonschema import FormatChecker\n from jsonschema.exceptions import ValidationError\n\n if value is not NOTSET:\n self._warn_if_not_json(value)\n final_val = value if value is not NOTSET else self.value\n if isinstance(final_val, ArgNotSet):\n if suppress_exception:\n return None\n raise ParamValidationError(\"No value passed and Param has no default value\")\n try:\n jsonschema.validate(final_val, self.schema, format_checker=FormatChecker())\n except ValidationError as err:\n if suppress_exception:\n return None\n raise ParamValidationError(err) from None\n self.value = final_val\n return final_val\n\n def dump(self) -> dict:\n \"\"\"Dump the Param as a dictionary\"\"\"\n out_dict = {self.CLASS_IDENTIFIER: f'{self.__module__}.{self.__class__.__name__}'}\n out_dict.update(self.__dict__)\n return out_dict\n\n @property\n def has_value(self) -> bool:\n return self.value is not NOTSET\n\n\nclass ParamsDict(MutableMapping[str, Any]):\n \"\"\"\n Class to hold all params for dags or tasks. All the keys are strictly string and values\n are converted into Param's object if they are not already. This class is to replace param's\n dictionary implicitly and ideally not needed to be used directly.\n \"\"\"\n\n __slots__ = ['__dict', 'suppress_exception']\n\n def __init__(self, dict_obj: Optional[Dict] = None, suppress_exception: bool = False):\n \"\"\"\n :param dict_obj: A dict or dict like object to init ParamsDict\n :param suppress_exception: Flag to suppress value exceptions while initializing the ParamsDict\n \"\"\"\n params_dict: Dict[str, Param] = {}\n dict_obj = dict_obj or {}\n for k, v in dict_obj.items():\n if not isinstance(v, Param):\n params_dict[k] = Param(v)\n else:\n params_dict[k] = v\n self.__dict = params_dict\n self.suppress_exception = suppress_exception\n\n def __copy__(self) -> \"ParamsDict\":\n return ParamsDict(self.__dict, self.suppress_exception)\n\n def __deepcopy__(self, memo: Optional[Dict[int, Any]]) -> \"ParamsDict\":\n return ParamsDict(copy.deepcopy(self.__dict, memo), self.suppress_exception)\n\n def __contains__(self, o: object) -> bool:\n return o in self.__dict\n\n def __len__(self) -> int:\n return len(self.__dict)\n\n def __delitem__(self, v: str) -> None:\n del self.__dict[v]\n\n def __iter__(self):\n return iter(self.__dict)\n\n def __repr__(self):\n return repr(self.dump())\n\n def __setitem__(self, key: str, value: Any) -> None:\n \"\"\"\n Override for dictionary's ``setitem`` method. This method make sure that all values are of\n Param's type only.\n\n :param key: A key which needs to be inserted or updated in the dict\n :param value: A value which needs to be set against the key. It could be of any\n type but will be converted and stored as a Param object eventually.\n \"\"\"\n if isinstance(value, Param):\n param = value\n elif key in self.__dict:\n param = self.__dict[key]\n try:\n param.resolve(value=value, suppress_exception=self.suppress_exception)\n except ParamValidationError as ve:\n raise ParamValidationError(f'Invalid input for param {key}: {ve}') from None\n else:\n # if the key isn't there already and if the value isn't of Param type create a new Param object\n param = Param(value)\n\n self.__dict[key] = param\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"\n Override for dictionary's ``getitem`` method. After fetching the key, it would call the\n resolve method as well on the Param object.\n\n :param key: The key to fetch\n \"\"\"\n param = self.__dict[key]\n return param.resolve(suppress_exception=self.suppress_exception)\n\n def get_param(self, key: str) -> Param:\n \"\"\"Get the internal :class:`.Param` object for this key\"\"\"\n return self.__dict[key]\n\n def items(self):\n return ItemsView(self.__dict)\n\n def values(self):\n return ValuesView(self.__dict)\n\n def update(self, *args, **kwargs) -> None:\n if len(args) == 1 and not kwargs and isinstance(args[0], ParamsDict):\n return super().update(args[0].__dict)\n super().update(*args, **kwargs)\n\n def dump(self) -> Dict[str, Any]:\n \"\"\"Dumps the ParamsDict object as a dictionary, while suppressing exceptions\"\"\"\n return {k: v.resolve(suppress_exception=True) for k, v in self.items()}\n\n def validate(self) -> Dict[str, Any]:\n \"\"\"Validates & returns all the Params object stored in the dictionary\"\"\"\n resolved_dict = {}\n try:\n for k, v in self.items():\n resolved_dict[k] = v.resolve(suppress_exception=self.suppress_exception)\n except ParamValidationError as ve:\n raise ParamValidationError(f'Invalid input for param {k}: {ve}') from None\n\n return resolved_dict\n\n\nclass DagParam:\n \"\"\"\n Class that represents a DAG run parameter & binds a simple Param object to a name within a DAG instance,\n so that it can be resolved during the run time via ``{{ context }}`` dictionary. The ideal use case of\n this class is to implicitly convert args passed to a method which is being decorated by ``@dag`` keyword.\n\n It can be used to parameterize your dags. You can overwrite its value by setting it on conf\n when you trigger your DagRun.\n\n This can also be used in templates by accessing ``{{context.params}}`` dictionary.\n\n **Example**:\n\n with DAG(...) as dag:\n EmailOperator(subject=dag.param('subject', 'Hi from Airflow!'))\n\n :param current_dag: Dag being used for parameter.\n :param name: key value which is used to set the parameter\n :param default: Default value used if no parameter was set.\n \"\"\"\n\n def __init__(self, current_dag: \"DAG\", name: str, default: Any = NOTSET):\n if default is not NOTSET:\n current_dag.params[name] = default\n self._name = name\n self._default = default\n\n def resolve(self, context: Context) -> Any:\n \"\"\"Pull DagParam value from DagRun context. This method is run during ``op.execute()``.\"\"\"\n with contextlib.suppress(KeyError):\n return context['dag_run'].conf[self._name]\n if self._default is not NOTSET:\n return self._default\n with contextlib.suppress(KeyError):\n return context['params'][self._name]\n raise AirflowException(f'No value could be resolved for parameter {self._name}')\n", "path": "airflow/models/param.py"}]} | 3,550 | 127 |
gh_patches_debug_3041 | rasdani/github-patches | git_diff | ethereum__web3.py-1107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Backport 1094 to v4 branch
### What was wrong?
https://github.com/ethereum/web3.py/issues/1094#issuecomment-428259232 needs to be backported to the v4 branch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8 extras_require = {
9 'tester': [
10 "eth-tester[py-evm]==0.1.0-beta.33",
11 "py-geth>=2.0.1,<3.0.0",
12 ],
13 'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
14 'linter': [
15 "flake8==3.4.1",
16 "isort>=4.2.15,<5",
17 ],
18 'docs': [
19 "mock",
20 "sphinx-better-theme>=0.1.4",
21 "click>=5.1",
22 "configparser==3.5.0",
23 "contextlib2>=0.5.4",
24 #"eth-testrpc>=0.8.0",
25 #"ethereum-tester-client>=1.1.0",
26 "ethtoken",
27 "py-geth>=1.4.0",
28 "py-solc>=0.4.0",
29 "pytest>=2.7.2",
30 "sphinx",
31 "sphinx_rtd_theme>=0.1.9",
32 "toposort>=1.4",
33 "urllib3",
34 "web3>=2.1.0",
35 "wheel"
36 ],
37 'dev': [
38 "bumpversion",
39 "flaky>=3.3.0",
40 "hypothesis>=3.31.2",
41 "pytest>=3.5.0,<4",
42 "pytest-mock==1.*",
43 "pytest-pythonpath>=0.3",
44 "pytest-watch==4.*",
45 "pytest-xdist==1.*",
46 "tox>=1.8.0",
47 "tqdm",
48 "when-changed"
49 ]
50 }
51
52 extras_require['dev'] = (
53 extras_require['tester'] +
54 extras_require['linter'] +
55 extras_require['docs'] +
56 extras_require['dev']
57 )
58
59 setup(
60 name='web3',
61 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
62 version='4.7.2',
63 description="""Web3.py""",
64 long_description_markdown_filename='README.md',
65 author='Piper Merriam',
66 author_email='[email protected]',
67 url='https://github.com/ethereum/web3.py',
68 include_package_data=True,
69 install_requires=[
70 "toolz>=0.9.0,<1.0.0;implementation_name=='pypy'",
71 "cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'",
72 "eth-abi>=1.2.0,<2.0.0",
73 "eth-account>=0.2.1,<0.4.0",
74 "eth-utils>=1.2.0,<2.0.0",
75 "hexbytes>=0.1.0,<1.0.0",
76 "lru-dict>=1.1.6,<2.0.0",
77 "eth-hash[pycryptodome]>=0.2.0,<1.0.0",
78 "requests>=2.16.0,<3.0.0",
79 "websockets>=6.0.0,<7.0.0",
80 "pypiwin32>=223;platform_system=='Windows'",
81 ],
82 setup_requires=['setuptools-markdown'],
83 python_requires='>=3.5, <4',
84 extras_require=extras_require,
85 py_modules=['web3', 'ens'],
86 license="MIT",
87 zip_safe=False,
88 keywords='ethereum',
89 packages=find_packages(exclude=["tests", "tests.*"]),
90 classifiers=[
91 'Development Status :: 5 - Production/Stable',
92 'Intended Audience :: Developers',
93 'License :: OSI Approved :: MIT License',
94 'Natural Language :: English',
95 'Programming Language :: Python :: 3',
96 'Programming Language :: Python :: 3.5',
97 'Programming Language :: Python :: 3.6',
98 ],
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
"pypiwin32>=223;platform_system=='Windows'",
],
setup_requires=['setuptools-markdown'],
- python_requires='>=3.5, <4',
+ python_requires='>=3.5.3,<4',
extras_require=extras_require,
py_modules=['web3', 'ens'],
license="MIT",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n- python_requires='>=3.5, <4',\n+ python_requires='>=3.5.3,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n", "issue": "Backport 1094 to v4 branch\n### What was wrong?\r\n\r\nhttps://github.com/ethereum/web3.py/issues/1094#issuecomment-428259232 needs to be backported to the v4 branch.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.33\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n #\"eth-testrpc>=0.8.0\",\n #\"ethereum-tester-client>=1.1.0\",\n \"ethtoken\",\n \"py-geth>=1.4.0\",\n \"py-solc>=0.4.0\",\n \"pytest>=2.7.2\",\n \"sphinx\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"urllib3\",\n \"web3>=2.1.0\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.3.0\",\n \"hypothesis>=3.31.2\",\n \"pytest>=3.5.0,<4\",\n \"pytest-mock==1.*\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch==4.*\",\n \"pytest-xdist==1.*\",\n \"tox>=1.8.0\",\n \"tqdm\",\n \"when-changed\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester'] +\n extras_require['linter'] +\n extras_require['docs'] +\n extras_require['dev']\n)\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.7.2',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.2.0,<2.0.0\",\n \"eth-account>=0.2.1,<0.4.0\",\n \"eth-utils>=1.2.0,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=6.0.0,<7.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require=extras_require,\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.33\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n #\"eth-testrpc>=0.8.0\",\n #\"ethereum-tester-client>=1.1.0\",\n \"ethtoken\",\n \"py-geth>=1.4.0\",\n \"py-solc>=0.4.0\",\n \"pytest>=2.7.2\",\n \"sphinx\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"urllib3\",\n \"web3>=2.1.0\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.3.0\",\n \"hypothesis>=3.31.2\",\n \"pytest>=3.5.0,<4\",\n \"pytest-mock==1.*\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch==4.*\",\n \"pytest-xdist==1.*\",\n \"tox>=1.8.0\",\n \"tqdm\",\n \"when-changed\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester'] +\n extras_require['linter'] +\n extras_require['docs'] +\n extras_require['dev']\n)\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.7.2',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.2.0,<2.0.0\",\n \"eth-account>=0.2.1,<0.4.0\",\n \"eth-utils>=1.2.0,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=6.0.0,<7.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5.3,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} | 1,409 | 109 |
gh_patches_debug_57129 | rasdani/github-patches | git_diff | liqd__adhocracy4-723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add test for module property module_starting_time_left
#720
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/modules/models.py`
Content:
```
1 import warnings
2
3 from autoslug import AutoSlugField
4 from django.db import models
5 from django.urls import reverse
6 from django.utils import timezone
7 from django.utils.functional import cached_property
8 from django.utils.translation import ugettext_lazy as _
9
10 from adhocracy4.models import base
11 from adhocracy4.projects import models as project_models
12
13
14 class ModulesQuerySet(models.QuerySet):
15
16 def annotate_module_start(self):
17 return self.annotate(module_start=models.Min('phase__start_date'))
18
19 def annotate_module_end(self):
20 return self.annotate(module_end=models.Max('phase__end_date'))
21
22 def running_modules(self):
23 """Return running modules."""
24 now = timezone.now()
25 return self\
26 .filter(is_draft=False)\
27 .annotate(module_start=models.Min('phase__start_date'))\
28 .annotate(module_end=models.Max('phase__end_date'))\
29 .filter(module_start__lte=now, module_end__gt=now)\
30 .order_by('module_start')
31
32 def past_modules(self):
33 """Return past modules ordered by start."""
34 return self\
35 .filter(is_draft=False)\
36 .annotate(module_start=models.Min('phase__start_date'))\
37 .annotate(module_end=models.Max('phase__end_date'))\
38 .filter(module_end__lte=timezone.now())\
39 .order_by('module_start')
40
41 def future_modules(self):
42 """
43 Return future modules ordered by start date.
44
45 Note: Modules without a start date are assumed to start in the future.
46 """
47 return self\
48 .filter(is_draft=False)\
49 .annotate(module_start=models.Min('phase__start_date'))\
50 .filter(models.Q(module_start__gt=timezone.now())
51 | models.Q(module_start=None))\
52 .order_by('module_start')
53
54 def past_and_running_modules(self):
55 """Return past and running modules ordered by start date."""
56 return self\
57 .filter(is_draft=False)\
58 .annotate(module_start=models.Min('phase__start_date'))\
59 .filter(module_start__lte=timezone.now())\
60 .order_by('module_start')
61
62
63 class Module(models.Model):
64 slug = AutoSlugField(populate_from='name', unique=True)
65 name = models.CharField(
66 max_length=512,
67 verbose_name=_('Title of the module'),
68 help_text=_('This title will appear in the timeline and the header on '
69 'the module and project detail pages. It should be '
70 'max. 512 characters long')
71 )
72 description = models.CharField(
73 null=True,
74 blank=True,
75 max_length=512,
76 verbose_name=_('Short description of the module'),
77 help_text=_('This short description will appear on the header of the '
78 'module and project detail pages. It should briefly state '
79 'the goal of the module in max. 512 chars.')
80 )
81 weight = models.PositiveIntegerField()
82 project = models.ForeignKey(
83 project_models.Project, on_delete=models.CASCADE)
84 is_draft = models.BooleanField(default=False)
85
86 objects = ModulesQuerySet.as_manager()
87
88 class Meta:
89 ordering = ['weight']
90
91 def __str__(self):
92 return "{} ({})".format(self.project, self.weight)
93
94 def get_absolute_url(self):
95 return reverse('module-detail', kwargs=dict(module_slug=self.slug))
96
97 @cached_property
98 def get_detail_url(self):
99 """
100 Return either project or module detail url, depending on cluster
101 and timeline logic.
102 """
103 if self.is_in_module_cluster:
104 return self.get_absolute_url()
105 elif self.project.display_timeline:
106 return '{}?initialSlide={}'.format(self.project.get_absolute_url(),
107 self.get_timeline_index)
108 return self.project.get_absolute_url()
109
110 @cached_property
111 def settings_instance(self):
112 settingslist = [field.name for field in self._meta.get_fields()
113 if field.name.endswith('_settings')]
114 for setting in settingslist:
115 if hasattr(self, setting):
116 return getattr(self, setting)
117
118 def has_feature(self, feature, model):
119 for phase in self.phase_set.all():
120 if phase.has_feature(feature, model):
121 return True
122 return False
123
124 # Phase properties to access the modules phases
125 @cached_property
126 def phases(self):
127 '''Return all phases for this module, ordered by weight.'''
128 return self.phase_set.all()
129
130 @cached_property
131 def active_phase(self):
132 '''
133 Return the currently active phase of the module.
134
135 Even though this is not enforced, there should only be one phase
136 active at any given time.
137 '''
138 return self.phase_set \
139 .active_phases() \
140 .first()
141
142 @cached_property
143 def future_phases(self):
144 '''Return all future phases for this module, ordered by start.'''
145 return self.phase_set.future_phases()
146
147 @cached_property
148 def past_phases(self):
149 '''Return all past phases for this module, ordered by start.'''
150 return self.phase_set.past_phases()
151
152 @cached_property
153 def last_active_phase(self):
154 '''
155 Return the phase that is currently still active or the past phase
156 that started last.
157
158 The past phase that started last should also have ended last,
159 because there should only be one phase running at any time.
160 This is the phase that's content is shown in the module view.
161 '''
162 return self.active_phase or self.past_phases.last()
163
164 # module properties combining all phases of self
165 @cached_property
166 def module_start(self):
167 '''Return the start date of the module.'''
168 return self.phase_set.order_by('start_date').first().start_date
169
170 @cached_property
171 def module_end(self):
172 '''Return the end date of the module.'''
173 return self.phase_set.order_by('-end_date').first().end_date
174
175 @cached_property
176 def module_has_started(self):
177 '''Test if the module has already started.'''
178 now = timezone.now()
179 return now >= self.module_start
180
181 @cached_property
182 def module_has_finished(self):
183 '''Test if the module has already finished.'''
184 now = timezone.now()
185 return now > self.module_end
186
187 def seconds_in_units(self, seconds):
188 '''Returns time and unit.'''
189 unit_totals = []
190
191 unit_limits = [
192 ([_('day'), _('days')], 24 * 3600),
193 ([_('hour'), _('hours')], 3600),
194 ([_('minute'), _('minutes')], 60),
195 ([_('second'), _('seconds')], 1)
196 ]
197
198 for unit_name, limit in unit_limits:
199 if seconds >= limit:
200 amount = int(float(seconds) / limit)
201 if amount > 1:
202 unit_totals.append((unit_name[1], amount))
203 else:
204 unit_totals.append((unit_name[0], amount))
205 seconds = seconds - (amount * limit)
206 unit_totals.append((_('seconds'), 0))
207
208 return unit_totals
209
210 @cached_property
211 def module_starting_time_left(self):
212 """
213 Return the time left until the module starts.
214 """
215
216 if not self.module_has_started:
217 now = timezone.now()
218 time_delta = self.module_start - now
219 seconds = time_delta.total_seconds()
220 time_delta_list = self.seconds_in_units(seconds)
221 best_unit = time_delta_list[0]
222 time_delta_str = '{} {}'.format(str(best_unit[1]),
223 str(best_unit[0]))
224 return time_delta_str
225
226 return None
227
228 @cached_property
229 def module_running_time_left(self):
230 """
231 Return the time left of the module if it is currently running.
232 """
233
234 if self.module_has_started and not self.module_has_finished:
235 now = timezone.now()
236 time_delta = self.module_end - now
237 seconds = time_delta.total_seconds()
238 time_delta_list = self.seconds_in_units(seconds)
239 best_unit = time_delta_list[0]
240 time_delta_str = '{} {}'.format(str(best_unit[1]),
241 str(best_unit[0]))
242 return time_delta_str
243
244 return None
245
246 @cached_property
247 def module_running_progress(self):
248 """
249 Return the progress of the module in percent
250 if it is currently running.
251 """
252 if self.module_has_started and not self.module_has_finished:
253 time_gone = timezone.now() - self.module_start
254 total_time = self.module_end - self.module_start
255 return round(time_gone / total_time * 100)
256 return None
257
258 # properties to determine the timeline/cluster logic to enable multiple
259 # modules in one project
260 @cached_property
261 def project_modules(self):
262 """
263 Return published modules of project.
264
265 Used in timeline/cluster logic, so needs to be filtered for
266 unpublished modules.
267 """
268 return self.project.module_set.filter(is_draft=False)
269
270 @cached_property
271 def other_modules(self):
272 """
273 Return all other published modules of project.
274 """
275 return self.project_modules.exclude(id=self.id)
276
277 @cached_property
278 def module_cluster(self):
279 for cluster in self.project.module_clusters:
280 if self in cluster:
281 return cluster
282 return []
283
284 @cached_property
285 def index_in_cluster(self):
286 try:
287 return self.module_cluster.index(self)
288 except IndexError:
289 return None
290
291 @cached_property
292 def readable_index_in_cluster(self):
293 if self.index_in_cluster is not None:
294 return self.index_in_cluster + 1
295
296 @cached_property
297 def is_in_module_cluster(self):
298 return len(self.module_cluster) > 1
299
300 @cached_property
301 def next_module_in_cluster(self):
302 if self.is_in_module_cluster:
303 cluster = self.module_cluster
304 idx = self.index_in_cluster
305 try:
306 return cluster[idx + 1]
307 except IndexError:
308 return None
309
310 @cached_property
311 def previous_module_in_cluster(self):
312 if self.is_in_module_cluster:
313 cluster = self.module_cluster
314 idx = self.index_in_cluster
315 try:
316 if idx > 0:
317 return cluster[idx - 1]
318 except IndexError:
319 return None
320
321 @cached_property
322 def get_timeline_index(self):
323 if self.project.display_timeline:
324 for count, cluster in enumerate(self.project.participation_dates):
325 if 'modules' in cluster and self in cluster['modules']:
326 return count
327 return 0
328
329 # Deprecated properties
330 @cached_property
331 def first_phase_start_date(self):
332 '''
333 Return the start date of the first phase in the module.
334
335 Attention: This method is _deprecated_. The property module_start
336 should be used instead.
337 '''
338 warnings.warn(
339 "first_phase_start_date is deprecated; use module_start.",
340 DeprecationWarning
341 )
342 first_phase = self.phase_set.order_by('start_date').first()
343 return first_phase.start_date
344
345
346 class Item(base.UserGeneratedContentModel):
347 module = models.ForeignKey(Module, on_delete=models.CASCADE)
348
349 @cached_property
350 def project(self):
351 return self.module.project
352
353
354 class AbstractSettings(models.Model):
355 module = models.OneToOneField(Module, on_delete=models.CASCADE,
356 related_name='%(class)s_settings')
357
358 class Meta:
359 abstract = True
360
361 @staticmethod
362 def widgets():
363 return {}
364
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/modules/models.py b/adhocracy4/modules/models.py
--- a/adhocracy4/modules/models.py
+++ b/adhocracy4/modules/models.py
@@ -285,7 +285,7 @@
def index_in_cluster(self):
try:
return self.module_cluster.index(self)
- except IndexError:
+ except (IndexError, ValueError):
return None
@cached_property
| {"golden_diff": "diff --git a/adhocracy4/modules/models.py b/adhocracy4/modules/models.py\n--- a/adhocracy4/modules/models.py\n+++ b/adhocracy4/modules/models.py\n@@ -285,7 +285,7 @@\n def index_in_cluster(self):\n try:\n return self.module_cluster.index(self)\n- except IndexError:\n+ except (IndexError, ValueError):\n return None\n \n @cached_property\n", "issue": "add test for module property module_starting_time_left\n#720\r\n\n", "before_files": [{"content": "import warnings\n\nfrom autoslug import AutoSlugField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.models import base\nfrom adhocracy4.projects import models as project_models\n\n\nclass ModulesQuerySet(models.QuerySet):\n\n def annotate_module_start(self):\n return self.annotate(module_start=models.Min('phase__start_date'))\n\n def annotate_module_end(self):\n return self.annotate(module_end=models.Max('phase__end_date'))\n\n def running_modules(self):\n \"\"\"Return running modules.\"\"\"\n now = timezone.now()\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .annotate(module_end=models.Max('phase__end_date'))\\\n .filter(module_start__lte=now, module_end__gt=now)\\\n .order_by('module_start')\n\n def past_modules(self):\n \"\"\"Return past modules ordered by start.\"\"\"\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .annotate(module_end=models.Max('phase__end_date'))\\\n .filter(module_end__lte=timezone.now())\\\n .order_by('module_start')\n\n def future_modules(self):\n \"\"\"\n Return future modules ordered by start date.\n\n Note: Modules without a start date are assumed to start in the future.\n \"\"\"\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .filter(models.Q(module_start__gt=timezone.now())\n | models.Q(module_start=None))\\\n .order_by('module_start')\n\n def past_and_running_modules(self):\n \"\"\"Return past and running modules ordered by start date.\"\"\"\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .filter(module_start__lte=timezone.now())\\\n .order_by('module_start')\n\n\nclass Module(models.Model):\n slug = AutoSlugField(populate_from='name', unique=True)\n name = models.CharField(\n max_length=512,\n verbose_name=_('Title of the module'),\n help_text=_('This title will appear in the timeline and the header on '\n 'the module and project detail pages. It should be '\n 'max. 512 characters long')\n )\n description = models.CharField(\n null=True,\n blank=True,\n max_length=512,\n verbose_name=_('Short description of the module'),\n help_text=_('This short description will appear on the header of the '\n 'module and project detail pages. It should briefly state '\n 'the goal of the module in max. 512 chars.')\n )\n weight = models.PositiveIntegerField()\n project = models.ForeignKey(\n project_models.Project, on_delete=models.CASCADE)\n is_draft = models.BooleanField(default=False)\n\n objects = ModulesQuerySet.as_manager()\n\n class Meta:\n ordering = ['weight']\n\n def __str__(self):\n return \"{} ({})\".format(self.project, self.weight)\n\n def get_absolute_url(self):\n return reverse('module-detail', kwargs=dict(module_slug=self.slug))\n\n @cached_property\n def get_detail_url(self):\n \"\"\"\n Return either project or module detail url, depending on cluster\n and timeline logic.\n \"\"\"\n if self.is_in_module_cluster:\n return self.get_absolute_url()\n elif self.project.display_timeline:\n return '{}?initialSlide={}'.format(self.project.get_absolute_url(),\n self.get_timeline_index)\n return self.project.get_absolute_url()\n\n @cached_property\n def settings_instance(self):\n settingslist = [field.name for field in self._meta.get_fields()\n if field.name.endswith('_settings')]\n for setting in settingslist:\n if hasattr(self, setting):\n return getattr(self, setting)\n\n def has_feature(self, feature, model):\n for phase in self.phase_set.all():\n if phase.has_feature(feature, model):\n return True\n return False\n\n # Phase properties to access the modules phases\n @cached_property\n def phases(self):\n '''Return all phases for this module, ordered by weight.'''\n return self.phase_set.all()\n\n @cached_property\n def active_phase(self):\n '''\n Return the currently active phase of the module.\n\n Even though this is not enforced, there should only be one phase\n active at any given time.\n '''\n return self.phase_set \\\n .active_phases() \\\n .first()\n\n @cached_property\n def future_phases(self):\n '''Return all future phases for this module, ordered by start.'''\n return self.phase_set.future_phases()\n\n @cached_property\n def past_phases(self):\n '''Return all past phases for this module, ordered by start.'''\n return self.phase_set.past_phases()\n\n @cached_property\n def last_active_phase(self):\n '''\n Return the phase that is currently still active or the past phase\n that started last.\n\n The past phase that started last should also have ended last,\n because there should only be one phase running at any time.\n This is the phase that's content is shown in the module view.\n '''\n return self.active_phase or self.past_phases.last()\n\n # module properties combining all phases of self\n @cached_property\n def module_start(self):\n '''Return the start date of the module.'''\n return self.phase_set.order_by('start_date').first().start_date\n\n @cached_property\n def module_end(self):\n '''Return the end date of the module.'''\n return self.phase_set.order_by('-end_date').first().end_date\n\n @cached_property\n def module_has_started(self):\n '''Test if the module has already started.'''\n now = timezone.now()\n return now >= self.module_start\n\n @cached_property\n def module_has_finished(self):\n '''Test if the module has already finished.'''\n now = timezone.now()\n return now > self.module_end\n\n def seconds_in_units(self, seconds):\n '''Returns time and unit.'''\n unit_totals = []\n\n unit_limits = [\n ([_('day'), _('days')], 24 * 3600),\n ([_('hour'), _('hours')], 3600),\n ([_('minute'), _('minutes')], 60),\n ([_('second'), _('seconds')], 1)\n ]\n\n for unit_name, limit in unit_limits:\n if seconds >= limit:\n amount = int(float(seconds) / limit)\n if amount > 1:\n unit_totals.append((unit_name[1], amount))\n else:\n unit_totals.append((unit_name[0], amount))\n seconds = seconds - (amount * limit)\n unit_totals.append((_('seconds'), 0))\n\n return unit_totals\n\n @cached_property\n def module_starting_time_left(self):\n \"\"\"\n Return the time left until the module starts.\n \"\"\"\n\n if not self.module_has_started:\n now = timezone.now()\n time_delta = self.module_start - now\n seconds = time_delta.total_seconds()\n time_delta_list = self.seconds_in_units(seconds)\n best_unit = time_delta_list[0]\n time_delta_str = '{} {}'.format(str(best_unit[1]),\n str(best_unit[0]))\n return time_delta_str\n\n return None\n\n @cached_property\n def module_running_time_left(self):\n \"\"\"\n Return the time left of the module if it is currently running.\n \"\"\"\n\n if self.module_has_started and not self.module_has_finished:\n now = timezone.now()\n time_delta = self.module_end - now\n seconds = time_delta.total_seconds()\n time_delta_list = self.seconds_in_units(seconds)\n best_unit = time_delta_list[0]\n time_delta_str = '{} {}'.format(str(best_unit[1]),\n str(best_unit[0]))\n return time_delta_str\n\n return None\n\n @cached_property\n def module_running_progress(self):\n \"\"\"\n Return the progress of the module in percent\n if it is currently running.\n \"\"\"\n if self.module_has_started and not self.module_has_finished:\n time_gone = timezone.now() - self.module_start\n total_time = self.module_end - self.module_start\n return round(time_gone / total_time * 100)\n return None\n\n # properties to determine the timeline/cluster logic to enable multiple\n # modules in one project\n @cached_property\n def project_modules(self):\n \"\"\"\n Return published modules of project.\n\n Used in timeline/cluster logic, so needs to be filtered for\n unpublished modules.\n \"\"\"\n return self.project.module_set.filter(is_draft=False)\n\n @cached_property\n def other_modules(self):\n \"\"\"\n Return all other published modules of project.\n \"\"\"\n return self.project_modules.exclude(id=self.id)\n\n @cached_property\n def module_cluster(self):\n for cluster in self.project.module_clusters:\n if self in cluster:\n return cluster\n return []\n\n @cached_property\n def index_in_cluster(self):\n try:\n return self.module_cluster.index(self)\n except IndexError:\n return None\n\n @cached_property\n def readable_index_in_cluster(self):\n if self.index_in_cluster is not None:\n return self.index_in_cluster + 1\n\n @cached_property\n def is_in_module_cluster(self):\n return len(self.module_cluster) > 1\n\n @cached_property\n def next_module_in_cluster(self):\n if self.is_in_module_cluster:\n cluster = self.module_cluster\n idx = self.index_in_cluster\n try:\n return cluster[idx + 1]\n except IndexError:\n return None\n\n @cached_property\n def previous_module_in_cluster(self):\n if self.is_in_module_cluster:\n cluster = self.module_cluster\n idx = self.index_in_cluster\n try:\n if idx > 0:\n return cluster[idx - 1]\n except IndexError:\n return None\n\n @cached_property\n def get_timeline_index(self):\n if self.project.display_timeline:\n for count, cluster in enumerate(self.project.participation_dates):\n if 'modules' in cluster and self in cluster['modules']:\n return count\n return 0\n\n # Deprecated properties\n @cached_property\n def first_phase_start_date(self):\n '''\n Return the start date of the first phase in the module.\n\n Attention: This method is _deprecated_. The property module_start\n should be used instead.\n '''\n warnings.warn(\n \"first_phase_start_date is deprecated; use module_start.\",\n DeprecationWarning\n )\n first_phase = self.phase_set.order_by('start_date').first()\n return first_phase.start_date\n\n\nclass Item(base.UserGeneratedContentModel):\n module = models.ForeignKey(Module, on_delete=models.CASCADE)\n\n @cached_property\n def project(self):\n return self.module.project\n\n\nclass AbstractSettings(models.Model):\n module = models.OneToOneField(Module, on_delete=models.CASCADE,\n related_name='%(class)s_settings')\n\n class Meta:\n abstract = True\n\n @staticmethod\n def widgets():\n return {}\n", "path": "adhocracy4/modules/models.py"}], "after_files": [{"content": "import warnings\n\nfrom autoslug import AutoSlugField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.models import base\nfrom adhocracy4.projects import models as project_models\n\n\nclass ModulesQuerySet(models.QuerySet):\n\n def annotate_module_start(self):\n return self.annotate(module_start=models.Min('phase__start_date'))\n\n def annotate_module_end(self):\n return self.annotate(module_end=models.Max('phase__end_date'))\n\n def running_modules(self):\n \"\"\"Return running modules.\"\"\"\n now = timezone.now()\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .annotate(module_end=models.Max('phase__end_date'))\\\n .filter(module_start__lte=now, module_end__gt=now)\\\n .order_by('module_start')\n\n def past_modules(self):\n \"\"\"Return past modules ordered by start.\"\"\"\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .annotate(module_end=models.Max('phase__end_date'))\\\n .filter(module_end__lte=timezone.now())\\\n .order_by('module_start')\n\n def future_modules(self):\n \"\"\"\n Return future modules ordered by start date.\n\n Note: Modules without a start date are assumed to start in the future.\n \"\"\"\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .filter(models.Q(module_start__gt=timezone.now())\n | models.Q(module_start=None))\\\n .order_by('module_start')\n\n def past_and_running_modules(self):\n \"\"\"Return past and running modules ordered by start date.\"\"\"\n return self\\\n .filter(is_draft=False)\\\n .annotate(module_start=models.Min('phase__start_date'))\\\n .filter(module_start__lte=timezone.now())\\\n .order_by('module_start')\n\n\nclass Module(models.Model):\n slug = AutoSlugField(populate_from='name', unique=True)\n name = models.CharField(\n max_length=512,\n verbose_name=_('Title of the module'),\n help_text=_('This title will appear in the timeline and the header on '\n 'the module and project detail pages. It should be '\n 'max. 512 characters long')\n )\n description = models.CharField(\n null=True,\n blank=True,\n max_length=512,\n verbose_name=_('Short description of the module'),\n help_text=_('This short description will appear on the header of the '\n 'module and project detail pages. It should briefly state '\n 'the goal of the module in max. 512 chars.')\n )\n weight = models.PositiveIntegerField()\n project = models.ForeignKey(\n project_models.Project, on_delete=models.CASCADE)\n is_draft = models.BooleanField(default=False)\n\n objects = ModulesQuerySet.as_manager()\n\n class Meta:\n ordering = ['weight']\n\n def __str__(self):\n return \"{} ({})\".format(self.project, self.weight)\n\n def get_absolute_url(self):\n return reverse('module-detail', kwargs=dict(module_slug=self.slug))\n\n @cached_property\n def get_detail_url(self):\n \"\"\"\n Return either project or module detail url, depending on cluster\n and timeline logic.\n \"\"\"\n if self.is_in_module_cluster:\n return self.get_absolute_url()\n elif self.project.display_timeline:\n return '{}?initialSlide={}'.format(self.project.get_absolute_url(),\n self.get_timeline_index)\n return self.project.get_absolute_url()\n\n @cached_property\n def settings_instance(self):\n settingslist = [field.name for field in self._meta.get_fields()\n if field.name.endswith('_settings')]\n for setting in settingslist:\n if hasattr(self, setting):\n return getattr(self, setting)\n\n def has_feature(self, feature, model):\n for phase in self.phase_set.all():\n if phase.has_feature(feature, model):\n return True\n return False\n\n # Phase properties to access the modules phases\n @cached_property\n def phases(self):\n '''Return all phases for this module, ordered by weight.'''\n return self.phase_set.all()\n\n @cached_property\n def active_phase(self):\n '''\n Return the currently active phase of the module.\n\n Even though this is not enforced, there should only be one phase\n active at any given time.\n '''\n return self.phase_set \\\n .active_phases() \\\n .first()\n\n @cached_property\n def future_phases(self):\n '''Return all future phases for this module, ordered by start.'''\n return self.phase_set.future_phases()\n\n @cached_property\n def past_phases(self):\n '''Return all past phases for this module, ordered by start.'''\n return self.phase_set.past_phases()\n\n @cached_property\n def last_active_phase(self):\n '''\n Return the phase that is currently still active or the past phase\n that started last.\n\n The past phase that started last should also have ended last,\n because there should only be one phase running at any time.\n This is the phase that's content is shown in the module view.\n '''\n return self.active_phase or self.past_phases.last()\n\n # module properties combining all phases of self\n @cached_property\n def module_start(self):\n '''Return the start date of the module.'''\n return self.phase_set.order_by('start_date').first().start_date\n\n @cached_property\n def module_end(self):\n '''Return the end date of the module.'''\n return self.phase_set.order_by('-end_date').first().end_date\n\n @cached_property\n def module_has_started(self):\n '''Test if the module has already started.'''\n now = timezone.now()\n return now >= self.module_start\n\n @cached_property\n def module_has_finished(self):\n '''Test if the module has already finished.'''\n now = timezone.now()\n return now > self.module_end\n\n def seconds_in_units(self, seconds):\n '''Returns time and unit.'''\n unit_totals = []\n\n unit_limits = [\n ([_('day'), _('days')], 24 * 3600),\n ([_('hour'), _('hours')], 3600),\n ([_('minute'), _('minutes')], 60),\n ([_('second'), _('seconds')], 1)\n ]\n\n for unit_name, limit in unit_limits:\n if seconds >= limit:\n amount = int(float(seconds) / limit)\n if amount > 1:\n unit_totals.append((unit_name[1], amount))\n else:\n unit_totals.append((unit_name[0], amount))\n seconds = seconds - (amount * limit)\n unit_totals.append((_('seconds'), 0))\n\n return unit_totals\n\n @cached_property\n def module_starting_time_left(self):\n \"\"\"\n Return the time left until the module starts.\n \"\"\"\n\n if not self.module_has_started:\n now = timezone.now()\n time_delta = self.module_start - now\n seconds = time_delta.total_seconds()\n time_delta_list = self.seconds_in_units(seconds)\n best_unit = time_delta_list[0]\n time_delta_str = '{} {}'.format(str(best_unit[1]),\n str(best_unit[0]))\n return time_delta_str\n\n return None\n\n @cached_property\n def module_running_time_left(self):\n \"\"\"\n Return the time left of the module if it is currently running.\n \"\"\"\n\n if self.module_has_started and not self.module_has_finished:\n now = timezone.now()\n time_delta = self.module_end - now\n seconds = time_delta.total_seconds()\n time_delta_list = self.seconds_in_units(seconds)\n best_unit = time_delta_list[0]\n time_delta_str = '{} {}'.format(str(best_unit[1]),\n str(best_unit[0]))\n return time_delta_str\n\n return None\n\n @cached_property\n def module_running_progress(self):\n \"\"\"\n Return the progress of the module in percent\n if it is currently running.\n \"\"\"\n if self.module_has_started and not self.module_has_finished:\n time_gone = timezone.now() - self.module_start\n total_time = self.module_end - self.module_start\n return round(time_gone / total_time * 100)\n return None\n\n # properties to determine the timeline/cluster logic to enable multiple\n # modules in one project\n @cached_property\n def project_modules(self):\n \"\"\"\n Return published modules of project.\n\n Used in timeline/cluster logic, so needs to be filtered for\n unpublished modules.\n \"\"\"\n return self.project.module_set.filter(is_draft=False)\n\n @cached_property\n def other_modules(self):\n \"\"\"\n Return all other published modules of project.\n \"\"\"\n return self.project_modules.exclude(id=self.id)\n\n @cached_property\n def module_cluster(self):\n for cluster in self.project.module_clusters:\n if self in cluster:\n return cluster\n return []\n\n @cached_property\n def index_in_cluster(self):\n try:\n return self.module_cluster.index(self)\n except (IndexError, ValueError):\n return None\n\n @cached_property\n def readable_index_in_cluster(self):\n if self.index_in_cluster is not None:\n return self.index_in_cluster + 1\n\n @cached_property\n def is_in_module_cluster(self):\n return len(self.module_cluster) > 1\n\n @cached_property\n def next_module_in_cluster(self):\n if self.is_in_module_cluster:\n cluster = self.module_cluster\n idx = self.index_in_cluster\n try:\n return cluster[idx + 1]\n except IndexError:\n return None\n\n @cached_property\n def previous_module_in_cluster(self):\n if self.is_in_module_cluster:\n cluster = self.module_cluster\n idx = self.index_in_cluster\n try:\n if idx > 0:\n return cluster[idx - 1]\n except IndexError:\n return None\n\n @cached_property\n def get_timeline_index(self):\n if self.project.display_timeline:\n for count, cluster in enumerate(self.project.participation_dates):\n if 'modules' in cluster and self in cluster['modules']:\n return count\n return 0\n\n # Deprecated properties\n @cached_property\n def first_phase_start_date(self):\n '''\n Return the start date of the first phase in the module.\n\n Attention: This method is _deprecated_. The property module_start\n should be used instead.\n '''\n warnings.warn(\n \"first_phase_start_date is deprecated; use module_start.\",\n DeprecationWarning\n )\n first_phase = self.phase_set.order_by('start_date').first()\n return first_phase.start_date\n\n\nclass Item(base.UserGeneratedContentModel):\n module = models.ForeignKey(Module, on_delete=models.CASCADE)\n\n @cached_property\n def project(self):\n return self.module.project\n\n\nclass AbstractSettings(models.Model):\n module = models.OneToOneField(Module, on_delete=models.CASCADE,\n related_name='%(class)s_settings')\n\n class Meta:\n abstract = True\n\n @staticmethod\n def widgets():\n return {}\n", "path": "adhocracy4/modules/models.py"}]} | 3,684 | 95 |
gh_patches_debug_11266 | rasdani/github-patches | git_diff | marshmallow-code__webargs-943 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Testsuite fails under pytest==8.2.0 with `'AsyncHTTPTestCase' has no attribute 'runTest'`
We currently have some test failures in basically all python versions, starting ~2 days ago. At first glance, it looks like an issue with `tornado`'s `AsyncHTTPTestCase`, but `tornado` doesn't have a recent release.
Looking at what projects updated recently, I flagged `pytest` as a good candidate for investigation, and testing with `pytest=8.1.2` works fine. So something related to unittest TestCases changed in 8.2.0 in a way that breaks tornado tests.
For reference, here's one of the error traces:
```
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'AsyncHTTPTestCase' object has no attribute 'runTest'
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'TestApp' object has no attribute 'runTest'
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'TestValidateApp' object has no attribute 'runTest'
```
`runTest` looks like a dynamically created method in unittest, whose name gets passed on init to TestCase.
I started looking at changes to `pytest`'s unittest module ( https://github.com/pytest-dev/pytest/blame/main/src/_pytest/unittest.py ), but I'm out of time for digging into this right now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/__init__.py`
Content:
```
1 from __future__ import annotations
2
3 import importlib.metadata
4
5 # Make marshmallow's validation functions importable from webargs
6 from marshmallow import validate
7 from marshmallow.utils import missing
8 from packaging.version import Version
9
10 from webargs import fields
11 from webargs.core import ValidationError
12
13 # TODO: Deprecate __version__ et al.
14 __version__ = importlib.metadata.version("webargs")
15 __parsed_version__ = Version(__version__)
16 __version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (
17 __parsed_version__.release
18 ) # type: ignore[assignment]
19 if __parsed_version__.pre:
20 __version_info__ += __parsed_version__.pre # type: ignore[assignment]
21 __all__ = ("ValidationError", "fields", "missing", "validate")
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/webargs/__init__.py b/src/webargs/__init__.py
--- a/src/webargs/__init__.py
+++ b/src/webargs/__init__.py
@@ -14,8 +14,8 @@
__version__ = importlib.metadata.version("webargs")
__parsed_version__ = Version(__version__)
__version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (
- __parsed_version__.release
-) # type: ignore[assignment]
+ __parsed_version__.release # type: ignore[assignment]
+)
if __parsed_version__.pre:
__version_info__ += __parsed_version__.pre # type: ignore[assignment]
__all__ = ("ValidationError", "fields", "missing", "validate")
| {"golden_diff": "diff --git a/src/webargs/__init__.py b/src/webargs/__init__.py\n--- a/src/webargs/__init__.py\n+++ b/src/webargs/__init__.py\n@@ -14,8 +14,8 @@\n __version__ = importlib.metadata.version(\"webargs\")\n __parsed_version__ = Version(__version__)\n __version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (\n- __parsed_version__.release\n-) # type: ignore[assignment]\n+ __parsed_version__.release # type: ignore[assignment]\n+)\n if __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n __all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "issue": "Testsuite fails under pytest==8.2.0 with `'AsyncHTTPTestCase' has no attribute 'runTest'`\nWe currently have some test failures in basically all python versions, starting ~2 days ago. At first glance, it looks like an issue with `tornado`'s `AsyncHTTPTestCase`, but `tornado` doesn't have a recent release.\r\n\r\nLooking at what projects updated recently, I flagged `pytest` as a good candidate for investigation, and testing with `pytest=8.1.2` works fine. So something related to unittest TestCases changed in 8.2.0 in a way that breaks tornado tests.\r\n\r\nFor reference, here's one of the error traces:\r\n```\r\n____________________ ERROR collecting tests/test_tornadoparser.py ____________________\r\n.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__\r\n setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))\r\nE AttributeError: 'AsyncHTTPTestCase' object has no attribute 'runTest'\r\n____________________ ERROR collecting tests/test_tornadoparser.py ____________________\r\n.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__\r\n setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))\r\nE AttributeError: 'TestApp' object has no attribute 'runTest'\r\n____________________ ERROR collecting tests/test_tornadoparser.py ____________________\r\n.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__\r\n setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))\r\nE AttributeError: 'TestValidateApp' object has no attribute 'runTest'\r\n```\r\n\r\n`runTest` looks like a dynamically created method in unittest, whose name gets passed on init to TestCase.\r\nI started looking at changes to `pytest`'s unittest module ( https://github.com/pytest-dev/pytest/blame/main/src/_pytest/unittest.py ), but I'm out of time for digging into this right now.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport importlib.metadata\n\n# Make marshmallow's validation functions importable from webargs\nfrom marshmallow import validate\nfrom marshmallow.utils import missing\nfrom packaging.version import Version\n\nfrom webargs import fields\nfrom webargs.core import ValidationError\n\n# TODO: Deprecate __version__ et al.\n__version__ = importlib.metadata.version(\"webargs\")\n__parsed_version__ = Version(__version__)\n__version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (\n __parsed_version__.release\n) # type: ignore[assignment]\nif __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n__all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "path": "src/webargs/__init__.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport importlib.metadata\n\n# Make marshmallow's validation functions importable from webargs\nfrom marshmallow import validate\nfrom marshmallow.utils import missing\nfrom packaging.version import Version\n\nfrom webargs import fields\nfrom webargs.core import ValidationError\n\n# TODO: Deprecate __version__ et al.\n__version__ = importlib.metadata.version(\"webargs\")\n__parsed_version__ = Version(__version__)\n__version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (\n __parsed_version__.release # type: ignore[assignment]\n)\nif __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n__all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "path": "src/webargs/__init__.py"}]} | 907 | 174 |
gh_patches_debug_7640 | rasdani/github-patches | git_diff | mlflow__mlflow-3242 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in loading spark ml models from gcs_artifact_storage [BUG]
Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) for additional information about bug reports. For help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).
**Please fill in this bug report template to ensure a timely and thorough response.**
### Willingness to contribute
The MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?
- [x] Yes. I can contribute a fix for this bug independently.
- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.
- [ ] No. I cannot contribute a bug fix at this time.
### System information
- **MLflow version (run ``mlflow --version``)**: 1.9.1
- **Python version**: 3.6.10
### Describe the problem
While loading/fetching sparkml models/artifacts from GCS bucket, raises the below error because `client.list_artifacts(run_id, artifact_relative_path)` returns the **<FileInfo: file_size=0, is_dir=False, path=artifact_relative_path +'/'>** which(path) does not exist along with other FileInfos. Hence it sends 404 error for file not found.
### Code to reproduce the issue
`mlflow.spark.load_model(gcs_path_to_model)`
or
`client.download_artifacts(run_id, sparkml_model_relative_gcs_path)`
### Other info / logs
Complete error logs
2020/07/07 13:49:25 INFO mlflow.spark: URI 'gs://gcs-datascience-adhoc-us/ashwar/ecf191ff5c864bc99b75e531b31c3e4a/artifacts/spark-model/sparkml' does not point to the current DFS.
2020/07/07 13:49:25 INFO mlflow.spark: File 'gs://gcs-datascience-adhoc-us/ashwar/ecf191ff5c864bc99b75e531b31c3e4a/artifacts/spark-model/sparkml' not found on DFS. Will attempt to upload the file.
Traceback (most recent call last):
File "/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py", line 951, in download_to_file
transport, file_obj, download_url, headers, start, end, raw_download
File "/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py", line 834, in _do_download
download.consume(transport)
File "/opt/conda/default/lib/python3.6/site-packages/google/resumable_media/requests/download.py", line 153, in consume
self._process_response(result)
File "/opt/conda/default/lib/python3.6/site-packages/google/resumable_media/_download.py", line 171, in _process_response
response, _ACCEPTABLE_STATUS_CODES, self._get_status_code
File "/opt/conda/default/lib/python3.6/site-packages/google/resumable_media/_helpers.py", line 96, in require_status_code
*status_codes
google.resumable_media.common.InvalidResponse: ('Request failed with status code', 404, 'Expected one of', <HTTPStatus.OK: 200>, <HTTPStatus.PARTIAL_CONTENT: 206>)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ashwar_gupta_zeotap_com/pyspark/predict.py", line 29, in <module>
model = mlflow.spark.load_model(model_production_uri, "hdfs:/user/mlflow/temp")
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/spark.py", line 501, in load_model
return _load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmpdir)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/spark.py", line 452, in _load_model
model_path = _HadoopFileSystem.maybe_copy_from_uri(model_uri, tmp_path)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/spark.py", line 302, in maybe_copy_from_uri
return cls.maybe_copy_from_local_file(_download_artifact_from_uri(src_uri), dst_path)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/tracking/artifact_utils.py", line 74, in _download_artifact_from_uri
artifact_path=artifact_path, dst_path=output_path)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/artifact_repo.py", line 128, in download_artifacts
return download_artifact_dir(artifact_path)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/artifact_repo.py", line 106, in download_artifact_dir
download_file(file_info.path)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/artifact_repo.py", line 90, in download_file
self._download_file(remote_file_path=fullpath, local_path=local_file_path)
File "/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/gcs_artifact_repo.py", line 105, in _download_file
gcs_bucket.blob(remote_full_path).download_to_filename(local_path)
File "/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py", line 1024, in download_to_filename
if_metageneration_not_match=if_metageneration_not_match,
File "/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py", line 954, in download_to_file
_raise_from_invalid_response(exc)
File "/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py", line 3091, in _raise_from_invalid_response
raise exceptions.from_http_status(response.status_code, message, response=response)
google.api_core.exceptions.NotFound: 404 GET https://storage.googleapis.com/download/storage/v1/b/gcs-datascience-adhoc-us/o/ashwar%2Fecf191ff5c864bc99b75e531b31c3e4a%2Fartifacts%2Fspark-model%2Fsparkml?alt=media: ('Request failed with status code', 404, 'Expected one of', <HTTPStatus.OK: 200>, <HTTPStatus.PARTIAL_CONTENT: 206>)
20/07/07 13:49:25 INFO org.spark_project.jetty.server.AbstractConnector: Stopped Spark@5b7269db{HTTP/1.1,[http/1.1]}{0.0.0.0:4040}
**PS: I have found the fix and tested the same.**
### What component(s), interfaces, languages, and integrations does this bug affect?
Components
- [x] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/store/artifact/gcs_artifact_repo.py`
Content:
```
1 import os
2
3 import posixpath
4 from six.moves import urllib
5
6 from mlflow.entities import FileInfo
7 from mlflow.store.artifact.artifact_repo import ArtifactRepository
8 from mlflow.utils.file_utils import relative_path_to_artifact_path
9 from mlflow.exceptions import MlflowException
10
11
12 class GCSArtifactRepository(ArtifactRepository):
13 """
14 Stores artifacts on Google Cloud Storage.
15
16 Assumes the google credentials are available in the environment,
17 see https://google-cloud.readthedocs.io/en/latest/core/auth.html.
18 """
19
20 def __init__(self, artifact_uri, client=None):
21 if client:
22 self.gcs = client
23 else:
24 from google.cloud import storage as gcs_storage
25
26 self.gcs = gcs_storage
27 super(GCSArtifactRepository, self).__init__(artifact_uri)
28
29 @staticmethod
30 def parse_gcs_uri(uri):
31 """Parse an GCS URI, returning (bucket, path)"""
32 parsed = urllib.parse.urlparse(uri)
33 if parsed.scheme != "gs":
34 raise Exception("Not a GCS URI: %s" % uri)
35 path = parsed.path
36 if path.startswith("/"):
37 path = path[1:]
38 return parsed.netloc, path
39
40 def _get_bucket(self, bucket):
41 from google.auth.exceptions import DefaultCredentialsError
42
43 try:
44 storage_client = self.gcs.Client()
45 except DefaultCredentialsError:
46 storage_client = self.gcs.Client.create_anonymous_client()
47 return storage_client.bucket(bucket)
48
49 def log_artifact(self, local_file, artifact_path=None):
50 (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)
51 if artifact_path:
52 dest_path = posixpath.join(dest_path, artifact_path)
53 dest_path = posixpath.join(dest_path, os.path.basename(local_file))
54
55 gcs_bucket = self._get_bucket(bucket)
56 blob = gcs_bucket.blob(dest_path)
57 blob.upload_from_filename(local_file)
58
59 def log_artifacts(self, local_dir, artifact_path=None):
60 (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)
61 if artifact_path:
62 dest_path = posixpath.join(dest_path, artifact_path)
63 gcs_bucket = self._get_bucket(bucket)
64
65 local_dir = os.path.abspath(local_dir)
66 for (root, _, filenames) in os.walk(local_dir):
67 upload_path = dest_path
68 if root != local_dir:
69 rel_path = os.path.relpath(root, local_dir)
70 rel_path = relative_path_to_artifact_path(rel_path)
71 upload_path = posixpath.join(dest_path, rel_path)
72 for f in filenames:
73 path = posixpath.join(upload_path, f)
74 gcs_bucket.blob(path).upload_from_filename(os.path.join(root, f))
75
76 def list_artifacts(self, path=None):
77 (bucket, artifact_path) = self.parse_gcs_uri(self.artifact_uri)
78 dest_path = artifact_path
79 if path:
80 dest_path = posixpath.join(dest_path, path)
81 prefix = dest_path + "/"
82
83 bkt = self._get_bucket(bucket)
84
85 infos = self._list_folders(bkt, prefix, artifact_path)
86
87 results = bkt.list_blobs(prefix=prefix, delimiter="/")
88 for result in results:
89 blob_path = result.name[len(artifact_path) + 1 :]
90 infos.append(FileInfo(blob_path, False, result.size))
91
92 return sorted(infos, key=lambda f: f.path)
93
94 def _list_folders(self, bkt, prefix, artifact_path):
95 results = bkt.list_blobs(prefix=prefix, delimiter="/")
96 dir_paths = set()
97 for page in results.pages:
98 dir_paths.update(page.prefixes)
99
100 return [FileInfo(path[len(artifact_path) + 1 : -1], True, None) for path in dir_paths]
101
102 def _download_file(self, remote_file_path, local_path):
103 (bucket, remote_root_path) = self.parse_gcs_uri(self.artifact_uri)
104 remote_full_path = posixpath.join(remote_root_path, remote_file_path)
105 gcs_bucket = self._get_bucket(bucket)
106 gcs_bucket.blob(remote_full_path).download_to_filename(local_path)
107
108 def delete_artifacts(self, artifact_path=None):
109 raise MlflowException("Not implemented yet")
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/store/artifact/gcs_artifact_repo.py b/mlflow/store/artifact/gcs_artifact_repo.py
--- a/mlflow/store/artifact/gcs_artifact_repo.py
+++ b/mlflow/store/artifact/gcs_artifact_repo.py
@@ -86,6 +86,10 @@
results = bkt.list_blobs(prefix=prefix, delimiter="/")
for result in results:
+ # skip blobs matching current directory path as list_blobs api
+ # returns subdirectories as well
+ if result.name == prefix:
+ continue
blob_path = result.name[len(artifact_path) + 1 :]
infos.append(FileInfo(blob_path, False, result.size))
| {"golden_diff": "diff --git a/mlflow/store/artifact/gcs_artifact_repo.py b/mlflow/store/artifact/gcs_artifact_repo.py\n--- a/mlflow/store/artifact/gcs_artifact_repo.py\n+++ b/mlflow/store/artifact/gcs_artifact_repo.py\n@@ -86,6 +86,10 @@\n \n results = bkt.list_blobs(prefix=prefix, delimiter=\"/\")\n for result in results:\n+ # skip blobs matching current directory path as list_blobs api\n+ # returns subdirectories as well\n+ if result.name == prefix:\n+ continue\n blob_path = result.name[len(artifact_path) + 1 :]\n infos.append(FileInfo(blob_path, False, result.size))\n", "issue": "Error in loading spark ml models from gcs_artifact_storage [BUG] \nThank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) for additional information about bug reports. For help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).\r\n\r\n**Please fill in this bug report template to ensure a timely and thorough response.**\r\n\r\n### Willingness to contribute\r\nThe MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?\r\n\r\n- [x] Yes. I can contribute a fix for this bug independently.\r\n- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.\r\n- [ ] No. I cannot contribute a bug fix at this time.\r\n\r\n### System information\r\n- **MLflow version (run ``mlflow --version``)**: 1.9.1\r\n- **Python version**: 3.6.10\r\n\r\n\r\n### Describe the problem\r\nWhile loading/fetching sparkml models/artifacts from GCS bucket, raises the below error because `client.list_artifacts(run_id, artifact_relative_path)` returns the **<FileInfo: file_size=0, is_dir=False, path=artifact_relative_path +'/'>** which(path) does not exist along with other FileInfos. Hence it sends 404 error for file not found.\r\n\r\n### Code to reproduce the issue\r\n`mlflow.spark.load_model(gcs_path_to_model)`\r\nor \r\n`client.download_artifacts(run_id, sparkml_model_relative_gcs_path)`\r\n\r\n\r\n### Other info / logs\r\nComplete error logs\r\n2020/07/07 13:49:25 INFO mlflow.spark: URI 'gs://gcs-datascience-adhoc-us/ashwar/ecf191ff5c864bc99b75e531b31c3e4a/artifacts/spark-model/sparkml' does not point to the current DFS.\r\n2020/07/07 13:49:25 INFO mlflow.spark: File 'gs://gcs-datascience-adhoc-us/ashwar/ecf191ff5c864bc99b75e531b31c3e4a/artifacts/spark-model/sparkml' not found on DFS. Will attempt to upload the file.\r\nTraceback (most recent call last):\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py\", line 951, in download_to_file\r\n transport, file_obj, download_url, headers, start, end, raw_download\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py\", line 834, in _do_download\r\n download.consume(transport)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/resumable_media/requests/download.py\", line 153, in consume\r\n self._process_response(result)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/resumable_media/_download.py\", line 171, in _process_response\r\n response, _ACCEPTABLE_STATUS_CODES, self._get_status_code\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/resumable_media/_helpers.py\", line 96, in require_status_code\r\n *status_codes\r\ngoogle.resumable_media.common.InvalidResponse: ('Request failed with status code', 404, 'Expected one of', <HTTPStatus.OK: 200>, <HTTPStatus.PARTIAL_CONTENT: 206>)\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/home/ashwar_gupta_zeotap_com/pyspark/predict.py\", line 29, in <module>\r\n model = mlflow.spark.load_model(model_production_uri, \"hdfs:/user/mlflow/temp\")\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/spark.py\", line 501, in load_model\r\n return _load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmpdir)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/spark.py\", line 452, in _load_model\r\n model_path = _HadoopFileSystem.maybe_copy_from_uri(model_uri, tmp_path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/spark.py\", line 302, in maybe_copy_from_uri\r\n return cls.maybe_copy_from_local_file(_download_artifact_from_uri(src_uri), dst_path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/tracking/artifact_utils.py\", line 74, in _download_artifact_from_uri\r\n artifact_path=artifact_path, dst_path=output_path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/artifact_repo.py\", line 128, in download_artifacts\r\n return download_artifact_dir(artifact_path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/artifact_repo.py\", line 106, in download_artifact_dir\r\n download_file(file_info.path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/artifact_repo.py\", line 90, in download_file\r\n self._download_file(remote_file_path=fullpath, local_path=local_file_path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/mlflow/store/artifact/gcs_artifact_repo.py\", line 105, in _download_file\r\n gcs_bucket.blob(remote_full_path).download_to_filename(local_path)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py\", line 1024, in download_to_filename\r\n if_metageneration_not_match=if_metageneration_not_match,\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py\", line 954, in download_to_file\r\n _raise_from_invalid_response(exc)\r\n File \"/opt/conda/default/lib/python3.6/site-packages/google/cloud/storage/blob.py\", line 3091, in _raise_from_invalid_response\r\n raise exceptions.from_http_status(response.status_code, message, response=response)\r\ngoogle.api_core.exceptions.NotFound: 404 GET https://storage.googleapis.com/download/storage/v1/b/gcs-datascience-adhoc-us/o/ashwar%2Fecf191ff5c864bc99b75e531b31c3e4a%2Fartifacts%2Fspark-model%2Fsparkml?alt=media: ('Request failed with status code', 404, 'Expected one of', <HTTPStatus.OK: 200>, <HTTPStatus.PARTIAL_CONTENT: 206>)\r\n20/07/07 13:49:25 INFO org.spark_project.jetty.server.AbstractConnector: Stopped Spark@5b7269db{HTTP/1.1,[http/1.1]}{0.0.0.0:4040}\r\n\r\n**PS: I have found the fix and tested the same.** \r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [x] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n\r\n\n", "before_files": [{"content": "import os\n\nimport posixpath\nfrom six.moves import urllib\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import relative_path_to_artifact_path\nfrom mlflow.exceptions import MlflowException\n\n\nclass GCSArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on Google Cloud Storage.\n\n Assumes the google credentials are available in the environment,\n see https://google-cloud.readthedocs.io/en/latest/core/auth.html.\n \"\"\"\n\n def __init__(self, artifact_uri, client=None):\n if client:\n self.gcs = client\n else:\n from google.cloud import storage as gcs_storage\n\n self.gcs = gcs_storage\n super(GCSArtifactRepository, self).__init__(artifact_uri)\n\n @staticmethod\n def parse_gcs_uri(uri):\n \"\"\"Parse an GCS URI, returning (bucket, path)\"\"\"\n parsed = urllib.parse.urlparse(uri)\n if parsed.scheme != \"gs\":\n raise Exception(\"Not a GCS URI: %s\" % uri)\n path = parsed.path\n if path.startswith(\"/\"):\n path = path[1:]\n return parsed.netloc, path\n\n def _get_bucket(self, bucket):\n from google.auth.exceptions import DefaultCredentialsError\n\n try:\n storage_client = self.gcs.Client()\n except DefaultCredentialsError:\n storage_client = self.gcs.Client.create_anonymous_client()\n return storage_client.bucket(bucket)\n\n def log_artifact(self, local_file, artifact_path=None):\n (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n dest_path = posixpath.join(dest_path, os.path.basename(local_file))\n\n gcs_bucket = self._get_bucket(bucket)\n blob = gcs_bucket.blob(dest_path)\n blob.upload_from_filename(local_file)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n gcs_bucket = self._get_bucket(bucket)\n\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = os.path.relpath(root, local_dir)\n rel_path = relative_path_to_artifact_path(rel_path)\n upload_path = posixpath.join(dest_path, rel_path)\n for f in filenames:\n path = posixpath.join(upload_path, f)\n gcs_bucket.blob(path).upload_from_filename(os.path.join(root, f))\n\n def list_artifacts(self, path=None):\n (bucket, artifact_path) = self.parse_gcs_uri(self.artifact_uri)\n dest_path = artifact_path\n if path:\n dest_path = posixpath.join(dest_path, path)\n prefix = dest_path + \"/\"\n\n bkt = self._get_bucket(bucket)\n\n infos = self._list_folders(bkt, prefix, artifact_path)\n\n results = bkt.list_blobs(prefix=prefix, delimiter=\"/\")\n for result in results:\n blob_path = result.name[len(artifact_path) + 1 :]\n infos.append(FileInfo(blob_path, False, result.size))\n\n return sorted(infos, key=lambda f: f.path)\n\n def _list_folders(self, bkt, prefix, artifact_path):\n results = bkt.list_blobs(prefix=prefix, delimiter=\"/\")\n dir_paths = set()\n for page in results.pages:\n dir_paths.update(page.prefixes)\n\n return [FileInfo(path[len(artifact_path) + 1 : -1], True, None) for path in dir_paths]\n\n def _download_file(self, remote_file_path, local_path):\n (bucket, remote_root_path) = self.parse_gcs_uri(self.artifact_uri)\n remote_full_path = posixpath.join(remote_root_path, remote_file_path)\n gcs_bucket = self._get_bucket(bucket)\n gcs_bucket.blob(remote_full_path).download_to_filename(local_path)\n\n def delete_artifacts(self, artifact_path=None):\n raise MlflowException(\"Not implemented yet\")\n", "path": "mlflow/store/artifact/gcs_artifact_repo.py"}], "after_files": [{"content": "import os\n\nimport posixpath\nfrom six.moves import urllib\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import relative_path_to_artifact_path\nfrom mlflow.exceptions import MlflowException\n\n\nclass GCSArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on Google Cloud Storage.\n\n Assumes the google credentials are available in the environment,\n see https://google-cloud.readthedocs.io/en/latest/core/auth.html.\n \"\"\"\n\n def __init__(self, artifact_uri, client=None):\n if client:\n self.gcs = client\n else:\n from google.cloud import storage as gcs_storage\n\n self.gcs = gcs_storage\n super(GCSArtifactRepository, self).__init__(artifact_uri)\n\n @staticmethod\n def parse_gcs_uri(uri):\n \"\"\"Parse an GCS URI, returning (bucket, path)\"\"\"\n parsed = urllib.parse.urlparse(uri)\n if parsed.scheme != \"gs\":\n raise Exception(\"Not a GCS URI: %s\" % uri)\n path = parsed.path\n if path.startswith(\"/\"):\n path = path[1:]\n return parsed.netloc, path\n\n def _get_bucket(self, bucket):\n from google.auth.exceptions import DefaultCredentialsError\n\n try:\n storage_client = self.gcs.Client()\n except DefaultCredentialsError:\n storage_client = self.gcs.Client.create_anonymous_client()\n return storage_client.bucket(bucket)\n\n def log_artifact(self, local_file, artifact_path=None):\n (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n dest_path = posixpath.join(dest_path, os.path.basename(local_file))\n\n gcs_bucket = self._get_bucket(bucket)\n blob = gcs_bucket.blob(dest_path)\n blob.upload_from_filename(local_file)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n gcs_bucket = self._get_bucket(bucket)\n\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = os.path.relpath(root, local_dir)\n rel_path = relative_path_to_artifact_path(rel_path)\n upload_path = posixpath.join(dest_path, rel_path)\n for f in filenames:\n path = posixpath.join(upload_path, f)\n gcs_bucket.blob(path).upload_from_filename(os.path.join(root, f))\n\n def list_artifacts(self, path=None):\n (bucket, artifact_path) = self.parse_gcs_uri(self.artifact_uri)\n dest_path = artifact_path\n if path:\n dest_path = posixpath.join(dest_path, path)\n prefix = dest_path + \"/\"\n\n bkt = self._get_bucket(bucket)\n\n infos = self._list_folders(bkt, prefix, artifact_path)\n\n results = bkt.list_blobs(prefix=prefix, delimiter=\"/\")\n for result in results:\n # skip blobs matching current directory path as list_blobs api\n # returns subdirectories as well\n if result.name == prefix:\n continue\n blob_path = result.name[len(artifact_path) + 1 :]\n infos.append(FileInfo(blob_path, False, result.size))\n\n return sorted(infos, key=lambda f: f.path)\n\n def _list_folders(self, bkt, prefix, artifact_path):\n results = bkt.list_blobs(prefix=prefix, delimiter=\"/\")\n dir_paths = set()\n for page in results.pages:\n dir_paths.update(page.prefixes)\n\n return [FileInfo(path[len(artifact_path) + 1 : -1], True, None) for path in dir_paths]\n\n def _download_file(self, remote_file_path, local_path):\n (bucket, remote_root_path) = self.parse_gcs_uri(self.artifact_uri)\n remote_full_path = posixpath.join(remote_root_path, remote_file_path)\n gcs_bucket = self._get_bucket(bucket)\n gcs_bucket.blob(remote_full_path).download_to_filename(local_path)\n\n def delete_artifacts(self, artifact_path=None):\n raise MlflowException(\"Not implemented yet\")\n", "path": "mlflow/store/artifact/gcs_artifact_repo.py"}]} | 3,288 | 155 |
gh_patches_debug_40516 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-11747 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request]: [batch processing] file compression ratio is not controlled,can you add control to it?
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What would your feature do ?
when we are in the img2img batch processing,the output file format defaults to highly compressed jpg,
under the resolution of 896x1200,it's only 45k,this is great,
but a lot of times,we don't want to lose image quality to save storage space,
although i have set [save jpeg picture quality] to 100% in [settings] [image save settings],
but it didn't work,i can't control it,
is there any way i can improve the quality of the picture myself?
### Proposed workflow
1. [settings] --[image save settings],
2. [save jpeg picture quality] to 100%
3. [img2img] -- [batch processing] the quality of the output picture will be controlled,
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/img2img.py`
Content:
```
1 import os
2 from contextlib import closing
3 from pathlib import Path
4
5 import numpy as np
6 from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
7 import gradio as gr
8
9 from modules import sd_samplers, images as imgutil
10 from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
11 from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
12 from modules.shared import opts, state
13 import modules.shared as shared
14 import modules.processing as processing
15 from modules.ui import plaintext_to_html
16 import modules.scripts
17
18
19 def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
20 processing.fix_seed(p)
21
22 images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp")))
23
24 is_inpaint_batch = False
25 if inpaint_mask_dir:
26 inpaint_masks = shared.listfiles(inpaint_mask_dir)
27 is_inpaint_batch = bool(inpaint_masks)
28
29 if is_inpaint_batch:
30 print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
31
32 print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
33
34 save_normally = output_dir == ''
35
36 p.do_not_save_grid = True
37 p.do_not_save_samples = not save_normally
38
39 state.job_count = len(images) * p.n_iter
40
41 # extract "default" params to use in case getting png info fails
42 prompt = p.prompt
43 negative_prompt = p.negative_prompt
44 seed = p.seed
45 cfg_scale = p.cfg_scale
46 sampler_name = p.sampler_name
47 steps = p.steps
48
49 for i, image in enumerate(images):
50 state.job = f"{i+1} out of {len(images)}"
51 if state.skipped:
52 state.skipped = False
53
54 if state.interrupted:
55 break
56
57 try:
58 img = Image.open(image)
59 except UnidentifiedImageError as e:
60 print(e)
61 continue
62 # Use the EXIF orientation of photos taken by smartphones.
63 img = ImageOps.exif_transpose(img)
64
65 if to_scale:
66 p.width = int(img.width * scale_by)
67 p.height = int(img.height * scale_by)
68
69 p.init_images = [img] * p.batch_size
70
71 image_path = Path(image)
72 if is_inpaint_batch:
73 # try to find corresponding mask for an image using simple filename matching
74 if len(inpaint_masks) == 1:
75 mask_image_path = inpaint_masks[0]
76 else:
77 # try to find corresponding mask for an image using simple filename matching
78 mask_image_dir = Path(inpaint_mask_dir)
79 masks_found = list(mask_image_dir.glob(f"{image_path.stem}.*"))
80
81 if len(masks_found) == 0:
82 print(f"Warning: mask is not found for {image_path} in {mask_image_dir}. Skipping it.")
83 continue
84
85 # it should contain only 1 matching mask
86 # otherwise user has many masks with the same name but different extensions
87 mask_image_path = masks_found[0]
88
89 mask_image = Image.open(mask_image_path)
90 p.image_mask = mask_image
91
92 if use_png_info:
93 try:
94 info_img = img
95 if png_info_dir:
96 info_img_path = os.path.join(png_info_dir, os.path.basename(image))
97 info_img = Image.open(info_img_path)
98 geninfo, _ = imgutil.read_info_from_image(info_img)
99 parsed_parameters = parse_generation_parameters(geninfo)
100 parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
101 except Exception:
102 parsed_parameters = {}
103
104 p.prompt = prompt + (" " + parsed_parameters["Prompt"] if "Prompt" in parsed_parameters else "")
105 p.negative_prompt = negative_prompt + (" " + parsed_parameters["Negative prompt"] if "Negative prompt" in parsed_parameters else "")
106 p.seed = int(parsed_parameters.get("Seed", seed))
107 p.cfg_scale = float(parsed_parameters.get("CFG scale", cfg_scale))
108 p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
109 p.steps = int(parsed_parameters.get("Steps", steps))
110
111 proc = modules.scripts.scripts_img2img.run(p, *args)
112 if proc is None:
113 proc = process_images(p)
114
115 for n, processed_image in enumerate(proc.images):
116 filename = image_path.name
117 relpath = os.path.dirname(os.path.relpath(image, input_dir))
118
119 if n > 0:
120 left, right = os.path.splitext(filename)
121 filename = f"{left}-{n}{right}"
122
123 if not save_normally:
124 os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)
125 if processed_image.mode == 'RGBA':
126 processed_image = processed_image.convert("RGB")
127 processed_image.save(os.path.join(output_dir, relpath, filename))
128
129
130 def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
131 override_settings = create_override_settings_dict(override_settings_texts)
132
133 is_batch = mode == 5
134
135 if mode == 0: # img2img
136 image = init_img.convert("RGB")
137 mask = None
138 elif mode == 1: # img2img sketch
139 image = sketch.convert("RGB")
140 mask = None
141 elif mode == 2: # inpaint
142 image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
143 alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
144 mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')
145 mask = ImageChops.lighter(alpha_mask, mask).convert('L')
146 image = image.convert("RGB")
147 elif mode == 3: # inpaint sketch
148 image = inpaint_color_sketch
149 orig = inpaint_color_sketch_orig or inpaint_color_sketch
150 pred = np.any(np.array(image) != np.array(orig), axis=-1)
151 mask = Image.fromarray(pred.astype(np.uint8) * 255, "L")
152 mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
153 blur = ImageFilter.GaussianBlur(mask_blur)
154 image = Image.composite(image.filter(blur), orig, mask.filter(blur))
155 image = image.convert("RGB")
156 elif mode == 4: # inpaint upload mask
157 image = init_img_inpaint
158 mask = init_mask_inpaint
159 else:
160 image = None
161 mask = None
162
163 # Use the EXIF orientation of photos taken by smartphones.
164 if image is not None:
165 image = ImageOps.exif_transpose(image)
166
167 if selected_scale_tab == 1 and not is_batch:
168 assert image, "Can't scale by because no image is selected"
169
170 width = int(image.width * scale_by)
171 height = int(image.height * scale_by)
172
173 assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
174
175 p = StableDiffusionProcessingImg2Img(
176 sd_model=shared.sd_model,
177 outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
178 outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
179 prompt=prompt,
180 negative_prompt=negative_prompt,
181 styles=prompt_styles,
182 seed=seed,
183 subseed=subseed,
184 subseed_strength=subseed_strength,
185 seed_resize_from_h=seed_resize_from_h,
186 seed_resize_from_w=seed_resize_from_w,
187 seed_enable_extras=seed_enable_extras,
188 sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,
189 batch_size=batch_size,
190 n_iter=n_iter,
191 steps=steps,
192 cfg_scale=cfg_scale,
193 width=width,
194 height=height,
195 restore_faces=restore_faces,
196 tiling=tiling,
197 init_images=[image],
198 mask=mask,
199 mask_blur=mask_blur,
200 inpainting_fill=inpainting_fill,
201 resize_mode=resize_mode,
202 denoising_strength=denoising_strength,
203 image_cfg_scale=image_cfg_scale,
204 inpaint_full_res=inpaint_full_res,
205 inpaint_full_res_padding=inpaint_full_res_padding,
206 inpainting_mask_invert=inpainting_mask_invert,
207 override_settings=override_settings,
208 )
209
210 p.scripts = modules.scripts.scripts_img2img
211 p.script_args = args
212
213 p.user = request.username
214
215 if shared.cmd_opts.enable_console_prompts:
216 print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
217
218 if mask:
219 p.extra_generation_params["Mask blur"] = mask_blur
220
221 with closing(p):
222 if is_batch:
223 assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
224
225 process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
226
227 processed = Processed(p, [], p.seed, "")
228 else:
229 processed = modules.scripts.scripts_img2img.run(p, *args)
230 if processed is None:
231 processed = process_images(p)
232
233 shared.total_tqdm.clear()
234
235 generation_info_js = processed.js()
236 if opts.samples_log_stdout:
237 print(generation_info_js)
238
239 if opts.do_not_show_images:
240 processed.images = []
241
242 return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/img2img.py b/modules/img2img.py
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -10,6 +10,7 @@
from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
+from modules.images import save_image
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
@@ -113,18 +114,18 @@
proc = process_images(p)
for n, processed_image in enumerate(proc.images):
- filename = image_path.name
+ filename = image_path.stem
+ infotext = proc.infotext(p, n)
relpath = os.path.dirname(os.path.relpath(image, input_dir))
if n > 0:
- left, right = os.path.splitext(filename)
- filename = f"{left}-{n}{right}"
+ filename += f"-{n}"
if not save_normally:
os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)
if processed_image.mode == 'RGBA':
processed_image = processed_image.convert("RGB")
- processed_image.save(os.path.join(output_dir, relpath, filename))
+ save_image(processed_image, os.path.join(output_dir, relpath), None, extension=opts.samples_format, info=infotext, forced_filename=filename, save_to_dirs=False)
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
| {"golden_diff": "diff --git a/modules/img2img.py b/modules/img2img.py\n--- a/modules/img2img.py\n+++ b/modules/img2img.py\n@@ -10,6 +10,7 @@\n from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters\r\n from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\n from modules.shared import opts, state\r\n+from modules.images import save_image\r\n import modules.shared as shared\r\n import modules.processing as processing\r\n from modules.ui import plaintext_to_html\r\n@@ -113,18 +114,18 @@\n proc = process_images(p)\r\n \r\n for n, processed_image in enumerate(proc.images):\r\n- filename = image_path.name\r\n+ filename = image_path.stem\r\n+ infotext = proc.infotext(p, n)\r\n relpath = os.path.dirname(os.path.relpath(image, input_dir))\r\n \r\n if n > 0:\r\n- left, right = os.path.splitext(filename)\r\n- filename = f\"{left}-{n}{right}\"\r\n+ filename += f\"-{n}\"\r\n \r\n if not save_normally:\r\n os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)\r\n if processed_image.mode == 'RGBA':\r\n processed_image = processed_image.convert(\"RGB\")\r\n- processed_image.save(os.path.join(output_dir, relpath, filename))\r\n+ save_image(processed_image, os.path.join(output_dir, relpath), None, extension=opts.samples_format, info=infotext, forced_filename=filename, save_to_dirs=False)\r\n \r\n \r\n def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):\n", "issue": "[Feature Request]: [batch processing] file compression ratio is not controlled,can you add control to it?\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What would your feature do ?\n\nwhen we are in the img2img batch processing,the output file format defaults to highly compressed jpg,\r\nunder the resolution of 896x1200,it's only 45k,this is great,\r\nbut a lot of times,we don't want to lose image quality to save storage space,\r\nalthough i have set [save jpeg picture quality] to 100% in [settings] [image save settings],\r\nbut it didn't work,i can't control it,\r\nis there any way i can improve the quality of the picture myself?\n\n### Proposed workflow\n\n1. [settings] --[image save settings],\r\n2. [save jpeg picture quality] to 100%\r\n3. [img2img] -- [batch processing] the quality of the output picture will be controlled,\r\n\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "import os\r\nfrom contextlib import closing\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError\r\nimport gradio as gr\r\n\r\nfrom modules import sd_samplers, images as imgutil\r\nfrom modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):\r\n processing.fix_seed(p)\r\n\r\n images = list(shared.walk_files(input_dir, allowed_extensions=(\".png\", \".jpg\", \".jpeg\", \".webp\")))\r\n\r\n is_inpaint_batch = False\r\n if inpaint_mask_dir:\r\n inpaint_masks = shared.listfiles(inpaint_mask_dir)\r\n is_inpaint_batch = bool(inpaint_masks)\r\n\r\n if is_inpaint_batch:\r\n print(f\"\\nInpaint batch is enabled. {len(inpaint_masks)} masks found.\")\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n # extract \"default\" params to use in case getting png info fails\r\n prompt = p.prompt\r\n negative_prompt = p.negative_prompt\r\n seed = p.seed\r\n cfg_scale = p.cfg_scale\r\n sampler_name = p.sampler_name\r\n steps = p.steps\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n try:\r\n img = Image.open(image)\r\n except UnidentifiedImageError as e:\r\n print(e)\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\r\n\r\n if to_scale:\r\n p.width = int(img.width * scale_by)\r\n p.height = int(img.height * scale_by)\r\n\r\n p.init_images = [img] * p.batch_size\r\n\r\n image_path = Path(image)\r\n if is_inpaint_batch:\r\n # try to find corresponding mask for an image using simple filename matching\r\n if len(inpaint_masks) == 1:\r\n mask_image_path = inpaint_masks[0]\r\n else:\r\n # try to find corresponding mask for an image using simple filename matching\r\n mask_image_dir = Path(inpaint_mask_dir)\r\n masks_found = list(mask_image_dir.glob(f\"{image_path.stem}.*\"))\r\n\r\n if len(masks_found) == 0:\r\n print(f\"Warning: mask is not found for {image_path} in {mask_image_dir}. Skipping it.\")\r\n continue\r\n\r\n # it should contain only 1 matching mask\r\n # otherwise user has many masks with the same name but different extensions\r\n mask_image_path = masks_found[0]\r\n\r\n mask_image = Image.open(mask_image_path)\r\n p.image_mask = mask_image\r\n\r\n if use_png_info:\r\n try:\r\n info_img = img\r\n if png_info_dir:\r\n info_img_path = os.path.join(png_info_dir, os.path.basename(image))\r\n info_img = Image.open(info_img_path)\r\n geninfo, _ = imgutil.read_info_from_image(info_img)\r\n parsed_parameters = parse_generation_parameters(geninfo)\r\n parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}\r\n except Exception:\r\n parsed_parameters = {}\r\n\r\n p.prompt = prompt + (\" \" + parsed_parameters[\"Prompt\"] if \"Prompt\" in parsed_parameters else \"\")\r\n p.negative_prompt = negative_prompt + (\" \" + parsed_parameters[\"Negative prompt\"] if \"Negative prompt\" in parsed_parameters else \"\")\r\n p.seed = int(parsed_parameters.get(\"Seed\", seed))\r\n p.cfg_scale = float(parsed_parameters.get(\"CFG scale\", cfg_scale))\r\n p.sampler_name = parsed_parameters.get(\"Sampler\", sampler_name)\r\n p.steps = int(parsed_parameters.get(\"Steps\", steps))\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = image_path.name\r\n relpath = os.path.dirname(os.path.relpath(image, input_dir))\r\n\r\n if n > 0:\r\n left, right = os.path.splitext(filename)\r\n filename = f\"{left}-{n}{right}\"\r\n\r\n if not save_normally:\r\n os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)\r\n if processed_image.mode == 'RGBA':\r\n processed_image = processed_image.convert(\"RGB\")\r\n processed_image.save(os.path.join(output_dir, relpath, filename))\r\n\r\n\r\ndef img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):\r\n override_settings = create_override_settings_dict(override_settings_texts)\r\n\r\n is_batch = mode == 5\r\n\r\n if mode == 0: # img2img\r\n image = init_img.convert(\"RGB\")\r\n mask = None\r\n elif mode == 1: # img2img sketch\r\n image = sketch.convert(\"RGB\")\r\n mask = None\r\n elif mode == 2: # inpaint\r\n image, mask = init_img_with_mask[\"image\"], init_img_with_mask[\"mask\"]\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask).convert('L')\r\n image = image.convert(\"RGB\")\r\n elif mode == 3: # inpaint sketch\r\n image = inpaint_color_sketch\r\n orig = inpaint_color_sketch_orig or inpaint_color_sketch\r\n pred = np.any(np.array(image) != np.array(orig), axis=-1)\r\n mask = Image.fromarray(pred.astype(np.uint8) * 255, \"L\")\r\n mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)\r\n blur = ImageFilter.GaussianBlur(mask_blur)\r\n image = Image.composite(image.filter(blur), orig, mask.filter(blur))\r\n image = image.convert(\"RGB\")\r\n elif mode == 4: # inpaint upload mask\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n else:\r\n image = None\r\n mask = None\r\n\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n if image is not None:\r\n image = ImageOps.exif_transpose(image)\r\n\r\n if selected_scale_tab == 1 and not is_batch:\r\n assert image, \"Can't scale by because no image is selected\"\r\n\r\n width = int(image.width * scale_by)\r\n height = int(image.height * scale_by)\r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=prompt_styles,\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n image_cfg_scale=image_cfg_scale,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n override_settings=override_settings,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_img2img\r\n p.script_args = args\r\n\r\n p.user = request.username\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n if mask:\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n with closing(p):\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)\r\n", "path": "modules/img2img.py"}], "after_files": [{"content": "import os\r\nfrom contextlib import closing\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError\r\nimport gradio as gr\r\n\r\nfrom modules import sd_samplers, images as imgutil\r\nfrom modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nfrom modules.images import save_image\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):\r\n processing.fix_seed(p)\r\n\r\n images = list(shared.walk_files(input_dir, allowed_extensions=(\".png\", \".jpg\", \".jpeg\", \".webp\")))\r\n\r\n is_inpaint_batch = False\r\n if inpaint_mask_dir:\r\n inpaint_masks = shared.listfiles(inpaint_mask_dir)\r\n is_inpaint_batch = bool(inpaint_masks)\r\n\r\n if is_inpaint_batch:\r\n print(f\"\\nInpaint batch is enabled. {len(inpaint_masks)} masks found.\")\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n # extract \"default\" params to use in case getting png info fails\r\n prompt = p.prompt\r\n negative_prompt = p.negative_prompt\r\n seed = p.seed\r\n cfg_scale = p.cfg_scale\r\n sampler_name = p.sampler_name\r\n steps = p.steps\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n try:\r\n img = Image.open(image)\r\n except UnidentifiedImageError as e:\r\n print(e)\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\r\n\r\n if to_scale:\r\n p.width = int(img.width * scale_by)\r\n p.height = int(img.height * scale_by)\r\n\r\n p.init_images = [img] * p.batch_size\r\n\r\n image_path = Path(image)\r\n if is_inpaint_batch:\r\n # try to find corresponding mask for an image using simple filename matching\r\n if len(inpaint_masks) == 1:\r\n mask_image_path = inpaint_masks[0]\r\n else:\r\n # try to find corresponding mask for an image using simple filename matching\r\n mask_image_dir = Path(inpaint_mask_dir)\r\n masks_found = list(mask_image_dir.glob(f\"{image_path.stem}.*\"))\r\n\r\n if len(masks_found) == 0:\r\n print(f\"Warning: mask is not found for {image_path} in {mask_image_dir}. Skipping it.\")\r\n continue\r\n\r\n # it should contain only 1 matching mask\r\n # otherwise user has many masks with the same name but different extensions\r\n mask_image_path = masks_found[0]\r\n\r\n mask_image = Image.open(mask_image_path)\r\n p.image_mask = mask_image\r\n\r\n if use_png_info:\r\n try:\r\n info_img = img\r\n if png_info_dir:\r\n info_img_path = os.path.join(png_info_dir, os.path.basename(image))\r\n info_img = Image.open(info_img_path)\r\n geninfo, _ = imgutil.read_info_from_image(info_img)\r\n parsed_parameters = parse_generation_parameters(geninfo)\r\n parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}\r\n except Exception:\r\n parsed_parameters = {}\r\n\r\n p.prompt = prompt + (\" \" + parsed_parameters[\"Prompt\"] if \"Prompt\" in parsed_parameters else \"\")\r\n p.negative_prompt = negative_prompt + (\" \" + parsed_parameters[\"Negative prompt\"] if \"Negative prompt\" in parsed_parameters else \"\")\r\n p.seed = int(parsed_parameters.get(\"Seed\", seed))\r\n p.cfg_scale = float(parsed_parameters.get(\"CFG scale\", cfg_scale))\r\n p.sampler_name = parsed_parameters.get(\"Sampler\", sampler_name)\r\n p.steps = int(parsed_parameters.get(\"Steps\", steps))\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = image_path.stem\r\n infotext = proc.infotext(p, n)\r\n relpath = os.path.dirname(os.path.relpath(image, input_dir))\r\n\r\n if n > 0:\r\n filename += f\"-{n}\"\r\n\r\n if not save_normally:\r\n os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)\r\n if processed_image.mode == 'RGBA':\r\n processed_image = processed_image.convert(\"RGB\")\r\n save_image(processed_image, os.path.join(output_dir, relpath), None, extension=opts.samples_format, info=infotext, forced_filename=filename, save_to_dirs=False)\r\n\r\n\r\ndef img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):\r\n override_settings = create_override_settings_dict(override_settings_texts)\r\n\r\n is_batch = mode == 5\r\n\r\n if mode == 0: # img2img\r\n image = init_img.convert(\"RGB\")\r\n mask = None\r\n elif mode == 1: # img2img sketch\r\n image = sketch.convert(\"RGB\")\r\n mask = None\r\n elif mode == 2: # inpaint\r\n image, mask = init_img_with_mask[\"image\"], init_img_with_mask[\"mask\"]\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask).convert('L')\r\n image = image.convert(\"RGB\")\r\n elif mode == 3: # inpaint sketch\r\n image = inpaint_color_sketch\r\n orig = inpaint_color_sketch_orig or inpaint_color_sketch\r\n pred = np.any(np.array(image) != np.array(orig), axis=-1)\r\n mask = Image.fromarray(pred.astype(np.uint8) * 255, \"L\")\r\n mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)\r\n blur = ImageFilter.GaussianBlur(mask_blur)\r\n image = Image.composite(image.filter(blur), orig, mask.filter(blur))\r\n image = image.convert(\"RGB\")\r\n elif mode == 4: # inpaint upload mask\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n else:\r\n image = None\r\n mask = None\r\n\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n if image is not None:\r\n image = ImageOps.exif_transpose(image)\r\n\r\n if selected_scale_tab == 1 and not is_batch:\r\n assert image, \"Can't scale by because no image is selected\"\r\n\r\n width = int(image.width * scale_by)\r\n height = int(image.height * scale_by)\r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=prompt_styles,\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n image_cfg_scale=image_cfg_scale,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n override_settings=override_settings,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_img2img\r\n p.script_args = args\r\n\r\n p.user = request.username\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n if mask:\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n with closing(p):\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)\r\n", "path": "modules/img2img.py"}]} | 3,627 | 619 |
gh_patches_debug_22777 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1370 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NC - Committee assignments missing for a house member
State: NC
There may be more, I just happen to find this today. Jay Adams is missing committee assignments. The API does not show any in roles.
https://openstates.org/api/v1/legislators/NCL000338/
The NCGA site shows committee assignments here: http://www.ncleg.net/gascripts/members/reports/committeeAssignments.pl?nUserid=697&Chamber=H
There may be something broken and it may affect more members in NC.
I have assignments for others. Have not determined what is different between Adams and the others
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/nc/committees.py`
Content:
```
1 from billy.scrape.committees import CommitteeScraper, Committee
2
3 import lxml.html
4
5 class NCCommitteeScraper(CommitteeScraper):
6 jurisdiction = 'nc'
7
8 def scrape_committee(self, committee, url):
9 url = url.replace(' ', '%20') + '&bPrintable=true'
10 data = self.get(url).text
11 doc = lxml.html.fromstring(data)
12 for row in doc.xpath('//table/tr'):
13 children = row.getchildren()
14 if len(children) != 2:
15 self.log('skipping members for ' + committee['committee'])
16 continue
17 mtype, members = row.getchildren()
18 if mtype.text == 'Members':
19 for m in members.getchildren():
20 committee.add_member(m.text)
21 else:
22 committee.add_member(members.text_content(), mtype.text)
23
24
25 def scrape(self, term, chambers):
26 base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='
27
28 chamber_slugs = {'upper': ['Senate%20Standing', 'Senate%20Select'],
29 'lower': ['House%20Standing', 'House%20Select']}
30
31 for chamber in chambers:
32 for ctype in chamber_slugs[chamber]:
33 data = self.get(base_url + ctype).text
34 doc = lxml.html.fromstring(data)
35 doc.make_links_absolute(base_url+ctype)
36 for comm in doc.xpath('//ul/li/a'):
37 name = comm.text
38 # skip committee of whole Senate
39 if 'Whole Senate' in name:
40 continue
41 url = comm.get('href')
42 committee = Committee(chamber, name)
43 self.scrape_committee(committee, url)
44 committee.add_source(url)
45 if not committee['members']:
46 self.warning('empty committee: %s', name)
47 else:
48 self.save_committee(committee)
49
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/nc/committees.py b/openstates/nc/committees.py
--- a/openstates/nc/committees.py
+++ b/openstates/nc/committees.py
@@ -17,10 +17,23 @@
mtype, members = row.getchildren()
if mtype.text == 'Members':
for m in members.getchildren():
- committee.add_member(m.text)
+ member_name = self._clean_member_name(m.text)
+ committee.add_member(member_name)
else:
- committee.add_member(members.text_content(), mtype.text)
+ member_name = self._clean_member_name(members.text_content())
+ committee.add_member(member_name, mtype.text)
+ def _clean_member_name(self, name):
+ """Names are displayed as "Office. LastName", e.g. "Rep. Adamsa". This strips the "Office. "
+
+ This helps the NameMatcher in billy link this to the correct legislator.
+ """
+ for prefix in ['Rep. ', 'Sen. ']:
+ if name.startswith(prefix):
+ return name.replace(prefix, '')
+
+ # If none hit, return the name as is
+ return name
def scrape(self, term, chambers):
base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='
| {"golden_diff": "diff --git a/openstates/nc/committees.py b/openstates/nc/committees.py\n--- a/openstates/nc/committees.py\n+++ b/openstates/nc/committees.py\n@@ -17,10 +17,23 @@\n mtype, members = row.getchildren()\n if mtype.text == 'Members':\n for m in members.getchildren():\n- committee.add_member(m.text)\n+ member_name = self._clean_member_name(m.text)\n+ committee.add_member(member_name)\n else:\n- committee.add_member(members.text_content(), mtype.text)\n+ member_name = self._clean_member_name(members.text_content())\n+ committee.add_member(member_name, mtype.text)\n \n+ def _clean_member_name(self, name):\n+ \"\"\"Names are displayed as \"Office. LastName\", e.g. \"Rep. Adamsa\". This strips the \"Office. \"\n+\n+ This helps the NameMatcher in billy link this to the correct legislator.\n+ \"\"\"\n+ for prefix in ['Rep. ', 'Sen. ']:\n+ if name.startswith(prefix):\n+ return name.replace(prefix, '')\n+\n+ # If none hit, return the name as is\n+ return name\n \n def scrape(self, term, chambers):\n base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='\n", "issue": "NC - Committee assignments missing for a house member\nState: NC\r\n\r\nThere may be more, I just happen to find this today. Jay Adams is missing committee assignments. The API does not show any in roles. \r\n\r\nhttps://openstates.org/api/v1/legislators/NCL000338/\r\n\r\nThe NCGA site shows committee assignments here: http://www.ncleg.net/gascripts/members/reports/committeeAssignments.pl?nUserid=697&Chamber=H\r\n\r\nThere may be something broken and it may affect more members in NC.\r\n\r\nI have assignments for others. Have not determined what is different between Adams and the others\n", "before_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\nclass NCCommitteeScraper(CommitteeScraper):\n jurisdiction = 'nc'\n\n def scrape_committee(self, committee, url):\n url = url.replace(' ', '%20') + '&bPrintable=true'\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n for row in doc.xpath('//table/tr'):\n children = row.getchildren()\n if len(children) != 2:\n self.log('skipping members for ' + committee['committee'])\n continue\n mtype, members = row.getchildren()\n if mtype.text == 'Members':\n for m in members.getchildren():\n committee.add_member(m.text)\n else:\n committee.add_member(members.text_content(), mtype.text)\n\n\n def scrape(self, term, chambers):\n base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='\n\n chamber_slugs = {'upper': ['Senate%20Standing', 'Senate%20Select'],\n 'lower': ['House%20Standing', 'House%20Select']}\n\n for chamber in chambers:\n for ctype in chamber_slugs[chamber]:\n data = self.get(base_url + ctype).text\n doc = lxml.html.fromstring(data)\n doc.make_links_absolute(base_url+ctype)\n for comm in doc.xpath('//ul/li/a'):\n name = comm.text\n # skip committee of whole Senate\n if 'Whole Senate' in name:\n continue\n url = comm.get('href')\n committee = Committee(chamber, name)\n self.scrape_committee(committee, url)\n committee.add_source(url)\n if not committee['members']:\n self.warning('empty committee: %s', name)\n else:\n self.save_committee(committee)\n\n", "path": "openstates/nc/committees.py"}], "after_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\nclass NCCommitteeScraper(CommitteeScraper):\n jurisdiction = 'nc'\n\n def scrape_committee(self, committee, url):\n url = url.replace(' ', '%20') + '&bPrintable=true'\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n for row in doc.xpath('//table/tr'):\n children = row.getchildren()\n if len(children) != 2:\n self.log('skipping members for ' + committee['committee'])\n continue\n mtype, members = row.getchildren()\n if mtype.text == 'Members':\n for m in members.getchildren():\n member_name = self._clean_member_name(m.text)\n committee.add_member(member_name)\n else:\n member_name = self._clean_member_name(members.text_content())\n committee.add_member(member_name, mtype.text)\n\n def _clean_member_name(self, name):\n \"\"\"Names are displayed as \"Office. LastName\", e.g. \"Rep. Adamsa\". This strips the \"Office. \"\n\n This helps the NameMatcher in billy link this to the correct legislator.\n \"\"\"\n for prefix in ['Rep. ', 'Sen. ']:\n if name.startswith(prefix):\n return name.replace(prefix, '')\n\n # If none hit, return the name as is\n return name\n\n def scrape(self, term, chambers):\n base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='\n\n chamber_slugs = {'upper': ['Senate%20Standing', 'Senate%20Select'],\n 'lower': ['House%20Standing', 'House%20Select']}\n\n for chamber in chambers:\n for ctype in chamber_slugs[chamber]:\n data = self.get(base_url + ctype).text\n doc = lxml.html.fromstring(data)\n doc.make_links_absolute(base_url+ctype)\n for comm in doc.xpath('//ul/li/a'):\n name = comm.text\n # skip committee of whole Senate\n if 'Whole Senate' in name:\n continue\n url = comm.get('href')\n committee = Committee(chamber, name)\n self.scrape_committee(committee, url)\n committee.add_source(url)\n if not committee['members']:\n self.warning('empty committee: %s', name)\n else:\n self.save_committee(committee)\n\n", "path": "openstates/nc/committees.py"}]} | 913 | 317 |
gh_patches_debug_858 | rasdani/github-patches | git_diff | pypi__warehouse-3292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warehouse file order differs from legacy PyPI file list
Tonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed.
The specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list.
At the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/legacy/api/simple.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13
14 from packaging.version import parse
15 from pyramid.httpexceptions import HTTPMovedPermanently
16 from pyramid.view import view_config
17 from sqlalchemy import func
18 from sqlalchemy.orm import joinedload
19
20 from warehouse.cache.http import cache_control
21 from warehouse.cache.origin import origin_cache
22 from warehouse.packaging.models import JournalEntry, File, Project, Release
23
24
25 @view_config(
26 route_name="legacy.api.simple.index",
27 renderer="legacy/api/simple/index.html",
28 decorator=[
29 cache_control(10 * 60), # 10 minutes
30 origin_cache(
31 1 * 24 * 60 * 60, # 1 day
32 stale_while_revalidate=5 * 60, # 5 minutes
33 stale_if_error=1 * 24 * 60 * 60, # 1 day
34 ),
35 ],
36 )
37 def simple_index(request):
38 # Get the latest serial number
39 serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0
40 request.response.headers["X-PyPI-Last-Serial"] = str(serial)
41
42 # Fetch the name and normalized name for all of our projects
43 projects = (
44 request.db.query(Project.name, Project.normalized_name)
45 .order_by(Project.normalized_name)
46 .all()
47 )
48
49 return {"projects": projects}
50
51
52 @view_config(
53 route_name="legacy.api.simple.detail",
54 renderer="legacy/api/simple/detail.html",
55 decorator=[
56 cache_control(10 * 60), # 10 minutes
57 origin_cache(
58 1 * 24 * 60 * 60, # 1 day
59 stale_while_revalidate=5 * 60, # 5 minutes
60 stale_if_error=1 * 24 * 60 * 60, # 1 day
61 ),
62 ],
63 )
64 def simple_detail(project, request):
65 # TODO: Handle files which are not hosted on PyPI
66
67 # Make sure that we're using the normalized version of the URL.
68 if (project.normalized_name !=
69 request.matchdict.get("name", project.normalized_name)):
70 return HTTPMovedPermanently(
71 request.current_route_path(name=project.normalized_name),
72 )
73
74 # Get the latest serial number for this project.
75 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
76
77 # Get all of the files for this project.
78 files = sorted(
79 request.db.query(File)
80 .options(joinedload(File.release))
81 .filter(
82 File.name == project.name,
83 File.version.in_(
84 request.db.query(Release)
85 .filter(Release.project == project)
86 .with_entities(Release.version)
87 )
88 )
89 .all(),
90 key=lambda f: (parse(f.version), f.packagetype)
91 )
92
93 return {"project": project, "files": files}
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py
--- a/warehouse/legacy/api/simple.py
+++ b/warehouse/legacy/api/simple.py
@@ -87,7 +87,7 @@
)
)
.all(),
- key=lambda f: (parse(f.version), f.packagetype)
+ key=lambda f: (parse(f.version), f.filename)
)
return {"project": project, "files": files}
| {"golden_diff": "diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py\n--- a/warehouse/legacy/api/simple.py\n+++ b/warehouse/legacy/api/simple.py\n@@ -87,7 +87,7 @@\n )\n )\n .all(),\n- key=lambda f: (parse(f.version), f.packagetype)\n+ key=lambda f: (parse(f.version), f.filename)\n )\n \n return {\"project\": project, \"files\": files}\n", "issue": "Warehouse file order differs from legacy PyPI file list\nTonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed.\r\n\r\nThe specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list.\r\n\r\nAt the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.packagetype)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.filename)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}]} | 1,403 | 105 |
gh_patches_debug_64588 | rasdani/github-patches | git_diff | kubeflow__pipelines-7172 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[sdk] Update absl-py requirement to accommodate future TF versions
### Environment
* KFP version:
N/A
* KFP SDK version:
1.8.10 (latest)
* All dependencies version:
N/A
### Steps to reproduce
I believe that Tensorflow is frequently used with KFP and TF and should be able to be installed together. TF updated their absl-py requirement to `>=1.0.0` recently. [setup.py in TF](https://github.com/tensorflow/tensorflow/blob/c93e4757491a4abb7aa990e18f2d321e4a935a03/tensorflow/tools/pip_package/setup.py#L77). The next TF version(probably, 2.8.0) cannot be installed with the recent version kfp, because kfp is pinned to `<=0.11`. TF-nightly was already hit by this issue.
```
$ pip install tf-nightly==2.9.0.dev20220116 kfp==1.8.10
...
ERROR: Cannot install kfp==1.8.10 and tf-nightly==2.9.0.dev20220116 because these package versions have conflicting dependencies.
The conflict is caused by:
tf-nightly 2.9.0.dev20220116 depends on absl-py>=1.0.0
kfp 1.8.10 depends on absl-py<=0.11 and >=0.9
To fix this you could try to:
1. loosen the range of package versions you've specified
2. remove package versions to allow pip attempt to solve the dependency conflict
ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies
```
### Expected result
I think that the version range of absl should become `absl-py>=0.9,<2.0.0` if possible. Absl-py doesn't have [any breaking change recently ([Change log](https://github.com/abseil/abseil-py/blob/main/CHANGELOG.md)) and it should be safe to upgrade. We are doing [a similar upgrade in TFX](https://github.com/tensorflow/tfx/pull/4573).
It would be great if you can do a patch release with the fix if possible. Because we are already seeing failures in integration tests that utilizing TF-nightly and kfp.
### Materials and Reference
N/A
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/setup.py`
Content:
```
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17
18 from setuptools import setup
19
20 NAME = 'kfp'
21 #VERSION = .... Change the version in kfp/__init__.py
22
23 # NOTICE, after any updates to the following, ./requirements.in should be updated
24 # accordingly.
25 REQUIRES = [
26 'absl-py>=0.9,<=0.11',
27 'PyYAML>=5.3,<6',
28 # `Blob.from_string` was introduced in google-cloud-storage 1.20.0
29 # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200
30 'google-cloud-storage>=1.20.0,<2',
31 'kubernetes>=8.0.0,<19',
32 # NOTE: Maintainers, please do not require google-auth>=2.x.x
33 # Until this issue is closed
34 # https://github.com/googleapis/google-cloud-python/issues/10566
35 'google-auth>=1.6.1,<3',
36 'requests-toolbelt>=0.8.0,<1',
37 'cloudpickle>=2.0.0,<3',
38 # Update the upper version whenever a new major version of the
39 # kfp-server-api package is released.
40 # Update the lower version when kfp sdk depends on new apis/fields in
41 # kfp-server-api.
42 # Note, please also update ./requirements.in
43 'kfp-server-api>=1.1.2,<2.0.0',
44 'jsonschema>=3.0.1,<4',
45 'tabulate>=0.8.6,<1',
46 'click>=7.1.2,<9',
47 'Deprecated>=1.2.7,<2',
48 'strip-hints>=0.1.8,<1',
49 'docstring-parser>=0.7.3,<1',
50 'kfp-pipeline-spec>=0.1.13,<0.2.0',
51 'fire>=0.3.1,<1',
52 'protobuf>=3.13.0,<4',
53 'uritemplate>=3.0.1,<4',
54 'pydantic>=1.8.2,<2',
55 'typer>=0.3.2,<1.0',
56 # Standard library backports
57 'dataclasses;python_version<"3.7"',
58 'typing-extensions>=3.7.4,<4;python_version<"3.9"',
59 ]
60
61 EXTRAS_REQUIRE = {
62 'all': ['docker'],
63 }
64
65
66 def find_version(*file_path_parts):
67 here = os.path.abspath(os.path.dirname(__file__))
68 with open(os.path.join(here, *file_path_parts), 'r') as fp:
69 version_file_text = fp.read()
70
71 version_match = re.search(
72 r"^__version__ = ['\"]([^'\"]*)['\"]",
73 version_file_text,
74 re.M,
75 )
76 if version_match:
77 return version_match.group(1)
78
79 raise RuntimeError('Unable to find version string.')
80
81
82 setup(
83 name=NAME,
84 version=find_version('kfp', '__init__.py'),
85 description='KubeFlow Pipelines SDK',
86 author='The Kubeflow Authors',
87 url="https://github.com/kubeflow/pipelines",
88 project_urls={
89 "Documentation":
90 "https://kubeflow-pipelines.readthedocs.io/en/stable/",
91 "Bug Tracker":
92 "https://github.com/kubeflow/pipelines/issues",
93 "Source":
94 "https://github.com/kubeflow/pipelines/tree/master/sdk",
95 "Changelog":
96 "https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md",
97 },
98 install_requires=REQUIRES,
99 extras_require=EXTRAS_REQUIRE,
100 packages=[
101 'kfp',
102 'kfp.auth',
103 'kfp.cli',
104 'kfp.cli.diagnose_me',
105 'kfp.compiler',
106 'kfp.components',
107 'kfp.components.structures',
108 'kfp.containers',
109 'kfp.dsl',
110 'kfp.dsl.extensions',
111 'kfp.notebook',
112 'kfp.v2',
113 'kfp.v2.compiler',
114 'kfp.v2.components',
115 'kfp.v2.components.types',
116 'kfp.v2.dsl',
117 ],
118 classifiers=[
119 'Intended Audience :: Developers',
120 'Intended Audience :: Education',
121 'Intended Audience :: Science/Research',
122 'License :: OSI Approved :: Apache Software License',
123 'Programming Language :: Python :: 3',
124 'Programming Language :: Python :: 3.6',
125 'Programming Language :: Python :: 3.7',
126 'Programming Language :: Python :: 3.8',
127 'Programming Language :: Python :: 3.9',
128 'Topic :: Scientific/Engineering',
129 'Topic :: Scientific/Engineering :: Artificial Intelligence',
130 'Topic :: Software Development',
131 'Topic :: Software Development :: Libraries',
132 'Topic :: Software Development :: Libraries :: Python Modules',
133 ],
134 python_requires='>=3.6.1',
135 include_package_data=True,
136 entry_points={
137 'console_scripts': [
138 'dsl-compile = kfp.compiler.main:main',
139 'dsl-compile-v2 = kfp.v2.compiler.main:main',
140 'kfp=kfp.__main__:main'
141 ]
142 })
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -23,7 +23,7 @@
# NOTICE, after any updates to the following, ./requirements.in should be updated
# accordingly.
REQUIRES = [
- 'absl-py>=0.9,<=0.11',
+ 'absl-py>=0.9,<2',
'PyYAML>=5.3,<6',
# `Blob.from_string` was introduced in google-cloud-storage 1.20.0
# https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -23,7 +23,7 @@\n # NOTICE, after any updates to the following, ./requirements.in should be updated\n # accordingly.\n REQUIRES = [\n- 'absl-py>=0.9,<=0.11',\n+ 'absl-py>=0.9,<2',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n", "issue": "[sdk] Update absl-py requirement to accommodate future TF versions\n### Environment\r\n\r\n* KFP version:\r\nN/A\r\n\r\n* KFP SDK version: \r\n1.8.10 (latest)\r\n\r\n* All dependencies version:\r\nN/A\r\n\r\n\r\n### Steps to reproduce\r\n\r\nI believe that Tensorflow is frequently used with KFP and TF and should be able to be installed together. TF updated their absl-py requirement to `>=1.0.0` recently. [setup.py in TF](https://github.com/tensorflow/tensorflow/blob/c93e4757491a4abb7aa990e18f2d321e4a935a03/tensorflow/tools/pip_package/setup.py#L77). The next TF version(probably, 2.8.0) cannot be installed with the recent version kfp, because kfp is pinned to `<=0.11`. TF-nightly was already hit by this issue.\r\n\r\n```\r\n$ pip install tf-nightly==2.9.0.dev20220116 kfp==1.8.10\r\n...\r\nERROR: Cannot install kfp==1.8.10 and tf-nightly==2.9.0.dev20220116 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n tf-nightly 2.9.0.dev20220116 depends on absl-py>=1.0.0\r\n kfp 1.8.10 depends on absl-py<=0.11 and >=0.9\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies\r\n```\r\n\r\n### Expected result\r\n\r\nI think that the version range of absl should become `absl-py>=0.9,<2.0.0` if possible. Absl-py doesn't have [any breaking change recently ([Change log](https://github.com/abseil/abseil-py/blob/main/CHANGELOG.md)) and it should be safe to upgrade. We are doing [a similar upgrade in TFX](https://github.com/tensorflow/tfx/pull/4573).\r\n\r\nIt would be great if you can do a patch release with the fix if possible. Because we are already seeing failures in integration tests that utilizing TF-nightly and kfp.\r\n\r\n### Materials and Reference\r\nN/A\r\n\r\n---\r\n\r\n<!-- Don't delete message below to encourage users to support your issue! -->\r\nImpacted by this bug? Give it a \ud83d\udc4d. We prioritise the issues with the most \ud83d\udc4d.\r\n\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.6.1,<3',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=2.0.0,<3',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.13,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n 'typer>=0.3.2,<1.0',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nEXTRAS_REQUIRE = {\n 'all': ['docker'],\n}\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\":\n \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\":\n \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\":\n \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\":\n \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n extras_require=EXTRAS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.dsl',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<2',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.6.1,<3',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=2.0.0,<3',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.13,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n 'typer>=0.3.2,<1.0',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nEXTRAS_REQUIRE = {\n 'all': ['docker'],\n}\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\":\n \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\":\n \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\":\n \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\":\n \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n extras_require=EXTRAS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.dsl',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]} | 2,474 | 151 |
gh_patches_debug_1566 | rasdani/github-patches | git_diff | google__mobly-585 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning for not using raw strings
The Windows test run gave some deprecation warnings:
============================== warnings summary ===============================
mobly\logger.py:32
T:\src\github\mobly\mobly\logger.py:32: DeprecationWarning: invalid escape sequence \d
logline_timestamp_re = re.compile('\d\d-\d\d \d\d:\d\d:\d\d.\d\d\d')
tests\mobly\base_test_test.py:152
T:\src\github\mobly\tests\mobly\base_test_test.py:152: DeprecationWarning: invalid escape sequence \*
'naming convention test_\*, abort.')
tests\mobly\base_test_test.py:190
T:\src\github\mobly\tests\mobly\base_test_test.py:190: DeprecationWarning: invalid escape sequence \*
'naming convention test_\*, abort.')
tests\mobly\test_runner_test.py:262
T:\src\github\mobly\tests\mobly\test_runner_test.py:262: DeprecationWarning: invalid escape sequence \.
'TestRunner\._teardown_logger\(\) called before '
tests\mobly\test_runner_test.py:263
T:\src\github\mobly\tests\mobly\test_runner_test.py:263: DeprecationWarning: invalid escape sequence \.
'TestRunner\.setup_logger\(\)!'):
tests\mobly\controllers\android_device_test.py:191
T:\src\github\mobly\tests\mobly\controllers\android_device_test.py:191: DeprecationWarning: invalid escape sequence \[
expected_msg = "More than one device matched: \['0', '0'\]"
tests\mobly\controllers\android_device_lib\adb_test.py:317
T:\src\github\mobly\tests\mobly\controllers\android_device_lib\adb_test.py:317: DeprecationWarning: invalid escape sequence \/
'shell', ['a b', '"blah"', '\/\/'], shell=False)
tests\mobly\controllers\android_device_lib\adb_test.py:318
T:\src\github\mobly\tests\mobly\controllers\android_device_lib\adb_test.py:318: DeprecationWarning: invalid escape sequence \/
self.assertEqual(adb_cmd, ['adb', 'shell', 'a b', '"blah"', "\/\/"])
tests\mobly\controllers\android_device_lib\adb_test.py:358
T:\src\github\mobly\tests\mobly\controllers\android_device_lib\adb_test.py:358: DeprecationWarning: invalid escape sequence \/
'shell', ['a b', '"blah"', '\/\/'], shell=True)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mobly/logger.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import print_function
16
17 import datetime
18 import logging
19 import os
20 import re
21 import sys
22
23 from mobly import records
24 from mobly import utils
25
26 log_line_format = '%(asctime)s.%(msecs).03d %(levelname)s %(message)s'
27 # The micro seconds are added by the format string above,
28 # so the time format does not include ms.
29 log_line_time_format = '%m-%d %H:%M:%S'
30 log_line_timestamp_len = 18
31
32 logline_timestamp_re = re.compile('\d\d-\d\d \d\d:\d\d:\d\d.\d\d\d')
33
34
35 def _parse_logline_timestamp(t):
36 """Parses a logline timestamp into a tuple.
37
38 Args:
39 t: Timestamp in logline format.
40
41 Returns:
42 An iterable of date and time elements in the order of month, day, hour,
43 minute, second, microsecond.
44 """
45 date, time = t.split(' ')
46 month, day = date.split('-')
47 h, m, s = time.split(':')
48 s, ms = s.split('.')
49 return (month, day, h, m, s, ms)
50
51
52 def is_valid_logline_timestamp(timestamp):
53 if len(timestamp) == log_line_timestamp_len:
54 if logline_timestamp_re.match(timestamp):
55 return True
56 return False
57
58
59 def logline_timestamp_comparator(t1, t2):
60 """Comparator for timestamps in logline format.
61
62 Args:
63 t1: Timestamp in logline format.
64 t2: Timestamp in logline format.
65
66 Returns:
67 -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2.
68 """
69 dt1 = _parse_logline_timestamp(t1)
70 dt2 = _parse_logline_timestamp(t2)
71 for u1, u2 in zip(dt1, dt2):
72 if u1 < u2:
73 return -1
74 elif u1 > u2:
75 return 1
76 return 0
77
78
79 def _get_timestamp(time_format, delta=None):
80 t = datetime.datetime.now()
81 if delta:
82 t = t + datetime.timedelta(seconds=delta)
83 return t.strftime(time_format)[:-3]
84
85
86 def epoch_to_log_line_timestamp(epoch_time, time_zone=None):
87 """Converts an epoch timestamp in ms to log line timestamp format, which
88 is readible for humans.
89
90 Args:
91 epoch_time: integer, an epoch timestamp in ms.
92 time_zone: instance of tzinfo, time zone information.
93 Using pytz rather than python 3.2 time_zone implementation for
94 python 2 compatibility reasons.
95
96 Returns:
97 A string that is the corresponding timestamp in log line timestamp
98 format.
99 """
100 s, ms = divmod(epoch_time, 1000)
101 d = datetime.datetime.fromtimestamp(s, tz=time_zone)
102 return d.strftime('%m-%d %H:%M:%S.') + str(ms)
103
104
105 def get_log_line_timestamp(delta=None):
106 """Returns a timestamp in the format used by log lines.
107
108 Default is current time. If a delta is set, the return value will be
109 the current time offset by delta seconds.
110
111 Args:
112 delta: Number of seconds to offset from current time; can be negative.
113
114 Returns:
115 A timestamp in log line format with an offset.
116 """
117 return _get_timestamp('%m-%d %H:%M:%S.%f', delta)
118
119
120 def get_log_file_timestamp(delta=None):
121 """Returns a timestamp in the format used for log file names.
122
123 Default is current time. If a delta is set, the return value will be
124 the current time offset by delta seconds.
125
126 Args:
127 delta: Number of seconds to offset from current time; can be negative.
128
129 Returns:
130 A timestamp in log filen name format with an offset.
131 """
132 return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)
133
134
135 def _setup_test_logger(log_path, prefix=None):
136 """Customizes the root logger for a test run.
137
138 The logger object has a stream handler and a file handler. The stream
139 handler logs INFO level to the terminal, the file handler logs DEBUG
140 level to files.
141
142 Args:
143 log_path: Location of the log file.
144 prefix: A prefix for each log line in terminal.
145 filename: Name of the log file. The default is the time the logger
146 is requested.
147 """
148 log = logging.getLogger()
149 kill_test_logger(log)
150 log.propagate = False
151 log.setLevel(logging.DEBUG)
152 # Log info to stream
153 terminal_format = log_line_format
154 if prefix:
155 terminal_format = '[%s] %s' % (prefix, log_line_format)
156 c_formatter = logging.Formatter(terminal_format, log_line_time_format)
157 ch = logging.StreamHandler(sys.stdout)
158 ch.setFormatter(c_formatter)
159 ch.setLevel(logging.INFO)
160 # Log everything to file
161 f_formatter = logging.Formatter(log_line_format, log_line_time_format)
162 # Write logger output to files
163 fh_info = logging.FileHandler(
164 os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))
165 fh_info.setFormatter(f_formatter)
166 fh_info.setLevel(logging.INFO)
167 fh_debug = logging.FileHandler(
168 os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))
169 fh_debug.setFormatter(f_formatter)
170 fh_debug.setLevel(logging.DEBUG)
171 log.addHandler(ch)
172 log.addHandler(fh_info)
173 log.addHandler(fh_debug)
174 log.log_path = log_path
175 logging.log_path = log_path
176
177
178 def kill_test_logger(logger):
179 """Cleans up a test logger object by removing all of its handlers.
180
181 Args:
182 logger: The logging object to clean up.
183 """
184 for h in list(logger.handlers):
185 logger.removeHandler(h)
186 if isinstance(h, logging.FileHandler):
187 h.close()
188
189
190 def create_latest_log_alias(actual_path):
191 """Creates a symlink to the latest test run logs.
192
193 Args:
194 actual_path: The source directory where the latest test run's logs are.
195 """
196 alias_path = os.path.join(os.path.dirname(actual_path), 'latest')
197 utils.create_alias(actual_path, alias_path)
198
199
200 def setup_test_logger(log_path, prefix=None, filename=None):
201 """Customizes the root logger for a test run.
202
203 Args:
204 log_path: Location of the report file.
205 prefix: A prefix for each log line in terminal.
206 filename: Name of the files. The default is the time the objects
207 are requested.
208 """
209 utils.create_dir(log_path)
210 _setup_test_logger(log_path, prefix)
211 logging.info('Test output folder: "%s"', log_path)
212 create_latest_log_alias(log_path)
213
214
215 def normalize_log_line_timestamp(log_line_timestamp):
216 """Replace special characters in log line timestamp with normal characters.
217
218 Args:
219 log_line_timestamp: A string in the log line timestamp format. Obtained
220 with get_log_line_timestamp.
221
222 Returns:
223 A string representing the same time as input timestamp, but without
224 special characters.
225 """
226 norm_tp = log_line_timestamp.replace(' ', '_')
227 norm_tp = norm_tp.replace(':', '-')
228 return norm_tp
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mobly/logger.py b/mobly/logger.py
--- a/mobly/logger.py
+++ b/mobly/logger.py
@@ -29,7 +29,7 @@
log_line_time_format = '%m-%d %H:%M:%S'
log_line_timestamp_len = 18
-logline_timestamp_re = re.compile('\d\d-\d\d \d\d:\d\d:\d\d.\d\d\d')
+logline_timestamp_re = re.compile(r'\d\d-\d\d \d\d:\d\d:\d\d.\d\d\d')
def _parse_logline_timestamp(t):
| {"golden_diff": "diff --git a/mobly/logger.py b/mobly/logger.py\n--- a/mobly/logger.py\n+++ b/mobly/logger.py\n@@ -29,7 +29,7 @@\n log_line_time_format = '%m-%d %H:%M:%S'\n log_line_timestamp_len = 18\n \n-logline_timestamp_re = re.compile('\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d.\\d\\d\\d')\n+logline_timestamp_re = re.compile(r'\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d.\\d\\d\\d')\n \n \n def _parse_logline_timestamp(t):\n", "issue": "DeprecationWarning for not using raw strings\nThe Windows test run gave some deprecation warnings:\r\n\r\n============================== warnings summary ===============================\r\nmobly\\logger.py:32\r\n T:\\src\\github\\mobly\\mobly\\logger.py:32: DeprecationWarning: invalid escape sequence \\d\r\n logline_timestamp_re = re.compile('\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d.\\d\\d\\d')\r\n\r\ntests\\mobly\\base_test_test.py:152\r\n T:\\src\\github\\mobly\\tests\\mobly\\base_test_test.py:152: DeprecationWarning: invalid escape sequence \\*\r\n 'naming convention test_\\*, abort.')\r\n\r\ntests\\mobly\\base_test_test.py:190\r\n T:\\src\\github\\mobly\\tests\\mobly\\base_test_test.py:190: DeprecationWarning: invalid escape sequence \\*\r\n 'naming convention test_\\*, abort.')\r\n\r\ntests\\mobly\\test_runner_test.py:262\r\n T:\\src\\github\\mobly\\tests\\mobly\\test_runner_test.py:262: DeprecationWarning: invalid escape sequence \\.\r\n 'TestRunner\\._teardown_logger\\(\\) called before '\r\n\r\ntests\\mobly\\test_runner_test.py:263\r\n T:\\src\\github\\mobly\\tests\\mobly\\test_runner_test.py:263: DeprecationWarning: invalid escape sequence \\.\r\n 'TestRunner\\.setup_logger\\(\\)!'):\r\n\r\ntests\\mobly\\controllers\\android_device_test.py:191\r\n T:\\src\\github\\mobly\\tests\\mobly\\controllers\\android_device_test.py:191: DeprecationWarning: invalid escape sequence \\[\r\n expected_msg = \"More than one device matched: \\['0', '0'\\]\"\r\n\r\ntests\\mobly\\controllers\\android_device_lib\\adb_test.py:317\r\n T:\\src\\github\\mobly\\tests\\mobly\\controllers\\android_device_lib\\adb_test.py:317: DeprecationWarning: invalid escape sequence \\/\r\n 'shell', ['a b', '\"blah\"', '\\/\\/'], shell=False)\r\n\r\ntests\\mobly\\controllers\\android_device_lib\\adb_test.py:318\r\n T:\\src\\github\\mobly\\tests\\mobly\\controllers\\android_device_lib\\adb_test.py:318: DeprecationWarning: invalid escape sequence \\/\r\n self.assertEqual(adb_cmd, ['adb', 'shell', 'a b', '\"blah\"', \"\\/\\/\"])\r\n\r\ntests\\mobly\\controllers\\android_device_lib\\adb_test.py:358\r\n T:\\src\\github\\mobly\\tests\\mobly\\controllers\\android_device_lib\\adb_test.py:358: DeprecationWarning: invalid escape sequence \\/\r\n 'shell', ['a b', '\"blah\"', '\\/\\/'], shell=True)\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport datetime\nimport logging\nimport os\nimport re\nimport sys\n\nfrom mobly import records\nfrom mobly import utils\n\nlog_line_format = '%(asctime)s.%(msecs).03d %(levelname)s %(message)s'\n# The micro seconds are added by the format string above,\n# so the time format does not include ms.\nlog_line_time_format = '%m-%d %H:%M:%S'\nlog_line_timestamp_len = 18\n\nlogline_timestamp_re = re.compile('\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d.\\d\\d\\d')\n\n\ndef _parse_logline_timestamp(t):\n \"\"\"Parses a logline timestamp into a tuple.\n\n Args:\n t: Timestamp in logline format.\n\n Returns:\n An iterable of date and time elements in the order of month, day, hour,\n minute, second, microsecond.\n \"\"\"\n date, time = t.split(' ')\n month, day = date.split('-')\n h, m, s = time.split(':')\n s, ms = s.split('.')\n return (month, day, h, m, s, ms)\n\n\ndef is_valid_logline_timestamp(timestamp):\n if len(timestamp) == log_line_timestamp_len:\n if logline_timestamp_re.match(timestamp):\n return True\n return False\n\n\ndef logline_timestamp_comparator(t1, t2):\n \"\"\"Comparator for timestamps in logline format.\n\n Args:\n t1: Timestamp in logline format.\n t2: Timestamp in logline format.\n\n Returns:\n -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2.\n \"\"\"\n dt1 = _parse_logline_timestamp(t1)\n dt2 = _parse_logline_timestamp(t2)\n for u1, u2 in zip(dt1, dt2):\n if u1 < u2:\n return -1\n elif u1 > u2:\n return 1\n return 0\n\n\ndef _get_timestamp(time_format, delta=None):\n t = datetime.datetime.now()\n if delta:\n t = t + datetime.timedelta(seconds=delta)\n return t.strftime(time_format)[:-3]\n\n\ndef epoch_to_log_line_timestamp(epoch_time, time_zone=None):\n \"\"\"Converts an epoch timestamp in ms to log line timestamp format, which\n is readible for humans.\n\n Args:\n epoch_time: integer, an epoch timestamp in ms.\n time_zone: instance of tzinfo, time zone information.\n Using pytz rather than python 3.2 time_zone implementation for\n python 2 compatibility reasons.\n\n Returns:\n A string that is the corresponding timestamp in log line timestamp\n format.\n \"\"\"\n s, ms = divmod(epoch_time, 1000)\n d = datetime.datetime.fromtimestamp(s, tz=time_zone)\n return d.strftime('%m-%d %H:%M:%S.') + str(ms)\n\n\ndef get_log_line_timestamp(delta=None):\n \"\"\"Returns a timestamp in the format used by log lines.\n\n Default is current time. If a delta is set, the return value will be\n the current time offset by delta seconds.\n\n Args:\n delta: Number of seconds to offset from current time; can be negative.\n\n Returns:\n A timestamp in log line format with an offset.\n \"\"\"\n return _get_timestamp('%m-%d %H:%M:%S.%f', delta)\n\n\ndef get_log_file_timestamp(delta=None):\n \"\"\"Returns a timestamp in the format used for log file names.\n\n Default is current time. If a delta is set, the return value will be\n the current time offset by delta seconds.\n\n Args:\n delta: Number of seconds to offset from current time; can be negative.\n\n Returns:\n A timestamp in log filen name format with an offset.\n \"\"\"\n return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)\n\n\ndef _setup_test_logger(log_path, prefix=None):\n \"\"\"Customizes the root logger for a test run.\n\n The logger object has a stream handler and a file handler. The stream\n handler logs INFO level to the terminal, the file handler logs DEBUG\n level to files.\n\n Args:\n log_path: Location of the log file.\n prefix: A prefix for each log line in terminal.\n filename: Name of the log file. The default is the time the logger\n is requested.\n \"\"\"\n log = logging.getLogger()\n kill_test_logger(log)\n log.propagate = False\n log.setLevel(logging.DEBUG)\n # Log info to stream\n terminal_format = log_line_format\n if prefix:\n terminal_format = '[%s] %s' % (prefix, log_line_format)\n c_formatter = logging.Formatter(terminal_format, log_line_time_format)\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(c_formatter)\n ch.setLevel(logging.INFO)\n # Log everything to file\n f_formatter = logging.Formatter(log_line_format, log_line_time_format)\n # Write logger output to files\n fh_info = logging.FileHandler(\n os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))\n fh_info.setFormatter(f_formatter)\n fh_info.setLevel(logging.INFO)\n fh_debug = logging.FileHandler(\n os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))\n fh_debug.setFormatter(f_formatter)\n fh_debug.setLevel(logging.DEBUG)\n log.addHandler(ch)\n log.addHandler(fh_info)\n log.addHandler(fh_debug)\n log.log_path = log_path\n logging.log_path = log_path\n\n\ndef kill_test_logger(logger):\n \"\"\"Cleans up a test logger object by removing all of its handlers.\n\n Args:\n logger: The logging object to clean up.\n \"\"\"\n for h in list(logger.handlers):\n logger.removeHandler(h)\n if isinstance(h, logging.FileHandler):\n h.close()\n\n\ndef create_latest_log_alias(actual_path):\n \"\"\"Creates a symlink to the latest test run logs.\n\n Args:\n actual_path: The source directory where the latest test run's logs are.\n \"\"\"\n alias_path = os.path.join(os.path.dirname(actual_path), 'latest')\n utils.create_alias(actual_path, alias_path)\n\n\ndef setup_test_logger(log_path, prefix=None, filename=None):\n \"\"\"Customizes the root logger for a test run.\n\n Args:\n log_path: Location of the report file.\n prefix: A prefix for each log line in terminal.\n filename: Name of the files. The default is the time the objects\n are requested.\n \"\"\"\n utils.create_dir(log_path)\n _setup_test_logger(log_path, prefix)\n logging.info('Test output folder: \"%s\"', log_path)\n create_latest_log_alias(log_path)\n\n\ndef normalize_log_line_timestamp(log_line_timestamp):\n \"\"\"Replace special characters in log line timestamp with normal characters.\n\n Args:\n log_line_timestamp: A string in the log line timestamp format. Obtained\n with get_log_line_timestamp.\n\n Returns:\n A string representing the same time as input timestamp, but without\n special characters.\n \"\"\"\n norm_tp = log_line_timestamp.replace(' ', '_')\n norm_tp = norm_tp.replace(':', '-')\n return norm_tp\n", "path": "mobly/logger.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport datetime\nimport logging\nimport os\nimport re\nimport sys\n\nfrom mobly import records\nfrom mobly import utils\n\nlog_line_format = '%(asctime)s.%(msecs).03d %(levelname)s %(message)s'\n# The micro seconds are added by the format string above,\n# so the time format does not include ms.\nlog_line_time_format = '%m-%d %H:%M:%S'\nlog_line_timestamp_len = 18\n\nlogline_timestamp_re = re.compile(r'\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d.\\d\\d\\d')\n\n\ndef _parse_logline_timestamp(t):\n \"\"\"Parses a logline timestamp into a tuple.\n\n Args:\n t: Timestamp in logline format.\n\n Returns:\n An iterable of date and time elements in the order of month, day, hour,\n minute, second, microsecond.\n \"\"\"\n date, time = t.split(' ')\n month, day = date.split('-')\n h, m, s = time.split(':')\n s, ms = s.split('.')\n return (month, day, h, m, s, ms)\n\n\ndef is_valid_logline_timestamp(timestamp):\n if len(timestamp) == log_line_timestamp_len:\n if logline_timestamp_re.match(timestamp):\n return True\n return False\n\n\ndef logline_timestamp_comparator(t1, t2):\n \"\"\"Comparator for timestamps in logline format.\n\n Args:\n t1: Timestamp in logline format.\n t2: Timestamp in logline format.\n\n Returns:\n -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2.\n \"\"\"\n dt1 = _parse_logline_timestamp(t1)\n dt2 = _parse_logline_timestamp(t2)\n for u1, u2 in zip(dt1, dt2):\n if u1 < u2:\n return -1\n elif u1 > u2:\n return 1\n return 0\n\n\ndef _get_timestamp(time_format, delta=None):\n t = datetime.datetime.now()\n if delta:\n t = t + datetime.timedelta(seconds=delta)\n return t.strftime(time_format)[:-3]\n\n\ndef epoch_to_log_line_timestamp(epoch_time, time_zone=None):\n \"\"\"Converts an epoch timestamp in ms to log line timestamp format, which\n is readible for humans.\n\n Args:\n epoch_time: integer, an epoch timestamp in ms.\n time_zone: instance of tzinfo, time zone information.\n Using pytz rather than python 3.2 time_zone implementation for\n python 2 compatibility reasons.\n\n Returns:\n A string that is the corresponding timestamp in log line timestamp\n format.\n \"\"\"\n s, ms = divmod(epoch_time, 1000)\n d = datetime.datetime.fromtimestamp(s, tz=time_zone)\n return d.strftime('%m-%d %H:%M:%S.') + str(ms)\n\n\ndef get_log_line_timestamp(delta=None):\n \"\"\"Returns a timestamp in the format used by log lines.\n\n Default is current time. If a delta is set, the return value will be\n the current time offset by delta seconds.\n\n Args:\n delta: Number of seconds to offset from current time; can be negative.\n\n Returns:\n A timestamp in log line format with an offset.\n \"\"\"\n return _get_timestamp('%m-%d %H:%M:%S.%f', delta)\n\n\ndef get_log_file_timestamp(delta=None):\n \"\"\"Returns a timestamp in the format used for log file names.\n\n Default is current time. If a delta is set, the return value will be\n the current time offset by delta seconds.\n\n Args:\n delta: Number of seconds to offset from current time; can be negative.\n\n Returns:\n A timestamp in log filen name format with an offset.\n \"\"\"\n return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)\n\n\ndef _setup_test_logger(log_path, prefix=None):\n \"\"\"Customizes the root logger for a test run.\n\n The logger object has a stream handler and a file handler. The stream\n handler logs INFO level to the terminal, the file handler logs DEBUG\n level to files.\n\n Args:\n log_path: Location of the log file.\n prefix: A prefix for each log line in terminal.\n filename: Name of the log file. The default is the time the logger\n is requested.\n \"\"\"\n log = logging.getLogger()\n kill_test_logger(log)\n log.propagate = False\n log.setLevel(logging.DEBUG)\n # Log info to stream\n terminal_format = log_line_format\n if prefix:\n terminal_format = '[%s] %s' % (prefix, log_line_format)\n c_formatter = logging.Formatter(terminal_format, log_line_time_format)\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(c_formatter)\n ch.setLevel(logging.INFO)\n # Log everything to file\n f_formatter = logging.Formatter(log_line_format, log_line_time_format)\n # Write logger output to files\n fh_info = logging.FileHandler(\n os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))\n fh_info.setFormatter(f_formatter)\n fh_info.setLevel(logging.INFO)\n fh_debug = logging.FileHandler(\n os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))\n fh_debug.setFormatter(f_formatter)\n fh_debug.setLevel(logging.DEBUG)\n log.addHandler(ch)\n log.addHandler(fh_info)\n log.addHandler(fh_debug)\n log.log_path = log_path\n logging.log_path = log_path\n\n\ndef kill_test_logger(logger):\n \"\"\"Cleans up a test logger object by removing all of its handlers.\n\n Args:\n logger: The logging object to clean up.\n \"\"\"\n for h in list(logger.handlers):\n logger.removeHandler(h)\n if isinstance(h, logging.FileHandler):\n h.close()\n\n\ndef create_latest_log_alias(actual_path):\n \"\"\"Creates a symlink to the latest test run logs.\n\n Args:\n actual_path: The source directory where the latest test run's logs are.\n \"\"\"\n alias_path = os.path.join(os.path.dirname(actual_path), 'latest')\n utils.create_alias(actual_path, alias_path)\n\n\ndef setup_test_logger(log_path, prefix=None, filename=None):\n \"\"\"Customizes the root logger for a test run.\n\n Args:\n log_path: Location of the report file.\n prefix: A prefix for each log line in terminal.\n filename: Name of the files. The default is the time the objects\n are requested.\n \"\"\"\n utils.create_dir(log_path)\n _setup_test_logger(log_path, prefix)\n logging.info('Test output folder: \"%s\"', log_path)\n create_latest_log_alias(log_path)\n\n\ndef normalize_log_line_timestamp(log_line_timestamp):\n \"\"\"Replace special characters in log line timestamp with normal characters.\n\n Args:\n log_line_timestamp: A string in the log line timestamp format. Obtained\n with get_log_line_timestamp.\n\n Returns:\n A string representing the same time as input timestamp, but without\n special characters.\n \"\"\"\n norm_tp = log_line_timestamp.replace(' ', '_')\n norm_tp = norm_tp.replace(':', '-')\n return norm_tp\n", "path": "mobly/logger.py"}]} | 3,171 | 134 |
gh_patches_debug_1088 | rasdani/github-patches | git_diff | doccano__doccano-1531 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TemplateDoesNotExist Error on start from README instructions
How to reproduce the behaviour
---------
I was following the instructions on the main README to install and start doccano with pip (copied here)
```
pip install doccano
doccano init
doccano createuser --username admin --password pass
doccano webserver --port 8000
```
and then in another terminal
```
doccano task
```
This all looks fine, until I try the next step (visiting in the browser). I get the following error (included the last line of the non-error log for reference)
```
[2021-06-10 09:56:42 -0700] [1046] [INFO] Handling signal: winch
Internal Server Error: /
Traceback (most recent call last):
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/base.py", line 204, in _get_response
response = response.render()
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 81, in rendered_content
template = self.resolve_template(self.template_name)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 63, in resolve_template
return select_template(template, using=self.using)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/loader.py", line 47, in select_template
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
django.template.exceptions.TemplateDoesNotExist: index.html
Internal Server Error: /favicon.ico
Traceback (most recent call last):
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/base.py", line 204, in _get_response
response = response.render()
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 81, in rendered_content
template = self.resolve_template(self.template_name)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 63, in resolve_template
return select_template(template, using=self.using)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/loader.py", line 47, in select_template
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
django.template.exceptions.TemplateDoesNotExist: index.html
```
Your Environment
---------
* Operating System: centos07
* Python Version Used: 3.7.2 (virtual environment)
* When you install doccano: 2021-Jun-10 (Today)
* How did you install doccano (Heroku button etc): pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cli.py`
Content:
```
1 import argparse
2 import multiprocessing
3 import os
4 import platform
5 import subprocess
6 import sys
7
8 from .app.celery import app
9 base = os.path.abspath(os.path.dirname(__file__))
10 sys.path.append(base)
11 manage_path = os.path.join(base, 'manage.py')
12 parser = argparse.ArgumentParser(description='doccano, text annotation for machine learning practitioners.')
13
14
15 def number_of_workers():
16 return (multiprocessing.cpu_count() * 2) + 1
17
18
19 def run_on_nix(args):
20 import gunicorn.app.base
21 import gunicorn.util
22
23 class StandaloneApplication(gunicorn.app.base.BaseApplication):
24
25 def __init__(self, options=None):
26 self.options = options or {}
27 super().__init__()
28
29 def load_config(self):
30 config = {key: value for key, value in self.options.items()
31 if key in self.cfg.settings and value is not None}
32 for key, value in config.items():
33 self.cfg.set(key.lower(), value)
34
35 def load(self):
36 return gunicorn.util.import_app('app.wsgi')
37
38 options = {
39 'bind': '%s:%s' % ('0.0.0.0', args.port),
40 'workers': number_of_workers(),
41 'chdir': base
42 }
43 StandaloneApplication(options).run()
44
45
46 def run_on_windows(args):
47 from waitress import serve
48 from app.wsgi import application
49 serve(application, port=args.port)
50
51
52 def command_db_init(args):
53 print('Setup Database.')
54 subprocess.call([sys.executable, manage_path, 'wait_for_db'], shell=False)
55 subprocess.call([sys.executable, manage_path, 'migrate'], shell=False)
56 subprocess.call([sys.executable, manage_path, 'create_roles'], shell=False)
57
58
59 def command_user_create(args):
60 print('Create admin user.')
61 subprocess.call([sys.executable, manage_path, 'create_admin',
62 '--username', args.username,
63 '--password', args.password,
64 '--email', args.email,
65 '--noinput'], shell=False)
66
67
68 def command_run_webserver(args):
69 print(f'Starting server with port {args.port}.')
70 if platform.system() == 'Windows':
71 run_on_windows(args)
72 else:
73 run_on_nix(args)
74
75
76 def command_run_task_queue(args):
77 print('Starting task queue.')
78 app.worker_main(
79 argv=[
80 '--app=app',
81 '--workdir={}'.format(base),
82 'worker',
83 '--loglevel=info',
84 '--concurrency={}'.format(args.concurrency),
85 ]
86 )
87
88
89 def command_help(args):
90 print(parser.parse_args([args.command, '--help']))
91
92
93 def main():
94 # Create a command line parser.
95 subparsers = parser.add_subparsers()
96
97 # Create a parser for db initialization.
98 parser_init = subparsers.add_parser('init', help='see `init -h`')
99
100 parser_init.set_defaults(handler=command_db_init)
101
102 # Create a parser for user creation.
103 parser_create_user = subparsers.add_parser('createuser', help='see `createuser -h`')
104 parser_create_user.add_argument('--username', type=str, default='admin', help='admin username')
105 parser_create_user.add_argument('--password', type=str, default='password', help='admin password')
106 parser_create_user.add_argument('--email', type=str, default='[email protected]', help='admin email')
107 parser_create_user.set_defaults(handler=command_user_create)
108
109 # Create a parser for web server.
110 parser_server = subparsers.add_parser('webserver', help='see `webserver -h`')
111 parser_server.add_argument('--port', type=int, default=8000, help='port number')
112 parser_server.set_defaults(handler=command_run_webserver)
113
114 # Create a parser for task queue.
115 parser_queue = subparsers.add_parser('task', help='see `task -h`')
116 parser_queue.add_argument('--concurrency', type=int, default=2, help='concurrency')
117 parser_queue.set_defaults(handler=command_run_task_queue)
118
119 # Create a parser for help.
120 parser_help = subparsers.add_parser('help', help='see `help -h`')
121 parser_help.add_argument('command', help='command name which help is shown')
122 parser_help.set_defaults(handler=command_help)
123
124 # Dispatch handler.
125 args = parser.parse_args()
126 if hasattr(args, 'handler'):
127 args.handler(args)
128 else:
129 # If specified unknown command, show help.
130 parser.print_help()
131
132
133 if __name__ == '__main__':
134 main()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -6,6 +6,7 @@
import sys
from .app.celery import app
+os.environ['DEBUG'] = 'False'
base = os.path.abspath(os.path.dirname(__file__))
sys.path.append(base)
manage_path = os.path.join(base, 'manage.py')
| {"golden_diff": "diff --git a/backend/cli.py b/backend/cli.py\n--- a/backend/cli.py\n+++ b/backend/cli.py\n@@ -6,6 +6,7 @@\n import sys\n \n from .app.celery import app\n+os.environ['DEBUG'] = 'False'\n base = os.path.abspath(os.path.dirname(__file__))\n sys.path.append(base)\n manage_path = os.path.join(base, 'manage.py')\n", "issue": "TemplateDoesNotExist Error on start from README instructions\nHow to reproduce the behaviour\r\n---------\r\n\r\nI was following the instructions on the main README to install and start doccano with pip (copied here)\r\n\r\n```\r\npip install doccano\r\ndoccano init\r\ndoccano createuser --username admin --password pass\r\ndoccano webserver --port 8000\r\n```\r\n\r\nand then in another terminal\r\n\r\n```\r\ndoccano task\r\n```\r\n\r\nThis all looks fine, until I try the next step (visiting in the browser). I get the following error (included the last line of the non-error log for reference)\r\n\r\n```\r\n[2021-06-10 09:56:42 -0700] [1046] [INFO] Handling signal: winch\r\nInternal Server Error: /\r\nTraceback (most recent call last):\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/base.py\", line 204, in _get_response\r\n response = response.render()\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py\", line 105, in render\r\n self.content = self.rendered_content\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py\", line 81, in rendered_content\r\n template = self.resolve_template(self.template_name)\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py\", line 63, in resolve_template\r\n return select_template(template, using=self.using)\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/loader.py\", line 47, in select_template\r\n raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)\r\ndjango.template.exceptions.TemplateDoesNotExist: index.html\r\nInternal Server Error: /favicon.ico\r\nTraceback (most recent call last):\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/base.py\", line 204, in _get_response\r\n response = response.render()\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py\", line 105, in render\r\n self.content = self.rendered_content\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py\", line 81, in rendered_content\r\n template = self.resolve_template(self.template_name)\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py\", line 63, in resolve_template\r\n return select_template(template, using=self.using)\r\n File \"/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/loader.py\", line 47, in select_template\r\n raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)\r\ndjango.template.exceptions.TemplateDoesNotExist: index.html\r\n```\r\n\r\nYour Environment\r\n---------\r\n* Operating System: centos07\r\n* Python Version Used: 3.7.2 (virtual environment)\r\n* When you install doccano: 2021-Jun-10 (Today)\r\n* How did you install doccano (Heroku button etc): pip\r\n\n", "before_files": [{"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport subprocess\nimport sys\n\nfrom .app.celery import app\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\nmanage_path = os.path.join(base, 'manage.py')\nparser = argparse.ArgumentParser(description='doccano, text annotation for machine learning practitioners.')\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {key: value for key, value in self.options.items()\n if key in self.cfg.settings and value is not None}\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app('app.wsgi')\n\n options = {\n 'bind': '%s:%s' % ('0.0.0.0', args.port),\n 'workers': number_of_workers(),\n 'chdir': base\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n from app.wsgi import application\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print('Setup Database.')\n subprocess.call([sys.executable, manage_path, 'wait_for_db'], shell=False)\n subprocess.call([sys.executable, manage_path, 'migrate'], shell=False)\n subprocess.call([sys.executable, manage_path, 'create_roles'], shell=False)\n\n\ndef command_user_create(args):\n print('Create admin user.')\n subprocess.call([sys.executable, manage_path, 'create_admin',\n '--username', args.username,\n '--password', args.password,\n '--email', args.email,\n '--noinput'], shell=False)\n\n\ndef command_run_webserver(args):\n print(f'Starting server with port {args.port}.')\n if platform.system() == 'Windows':\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print('Starting task queue.')\n app.worker_main(\n argv=[\n '--app=app',\n '--workdir={}'.format(base),\n 'worker',\n '--loglevel=info',\n '--concurrency={}'.format(args.concurrency),\n ]\n )\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, '--help']))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser('init', help='see `init -h`')\n\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser('createuser', help='see `createuser -h`')\n parser_create_user.add_argument('--username', type=str, default='admin', help='admin username')\n parser_create_user.add_argument('--password', type=str, default='password', help='admin password')\n parser_create_user.add_argument('--email', type=str, default='[email protected]', help='admin email')\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser('webserver', help='see `webserver -h`')\n parser_server.add_argument('--port', type=int, default=8000, help='port number')\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser('task', help='see `task -h`')\n parser_queue.add_argument('--concurrency', type=int, default=2, help='concurrency')\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser('help', help='see `help -h`')\n parser_help.add_argument('command', help='command name which help is shown')\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, 'handler'):\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == '__main__':\n main()\n", "path": "backend/cli.py"}], "after_files": [{"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport subprocess\nimport sys\n\nfrom .app.celery import app\nos.environ['DEBUG'] = 'False'\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\nmanage_path = os.path.join(base, 'manage.py')\nparser = argparse.ArgumentParser(description='doccano, text annotation for machine learning practitioners.')\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {key: value for key, value in self.options.items()\n if key in self.cfg.settings and value is not None}\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app('app.wsgi')\n\n options = {\n 'bind': '%s:%s' % ('0.0.0.0', args.port),\n 'workers': number_of_workers(),\n 'chdir': base\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n from app.wsgi import application\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print('Setup Database.')\n subprocess.call([sys.executable, manage_path, 'wait_for_db'], shell=False)\n subprocess.call([sys.executable, manage_path, 'migrate'], shell=False)\n subprocess.call([sys.executable, manage_path, 'create_roles'], shell=False)\n\n\ndef command_user_create(args):\n print('Create admin user.')\n subprocess.call([sys.executable, manage_path, 'create_admin',\n '--username', args.username,\n '--password', args.password,\n '--email', args.email,\n '--noinput'], shell=False)\n\n\ndef command_run_webserver(args):\n print(f'Starting server with port {args.port}.')\n if platform.system() == 'Windows':\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print('Starting task queue.')\n app.worker_main(\n argv=[\n '--app=app',\n '--workdir={}'.format(base),\n 'worker',\n '--loglevel=info',\n '--concurrency={}'.format(args.concurrency),\n ]\n )\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, '--help']))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser('init', help='see `init -h`')\n\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser('createuser', help='see `createuser -h`')\n parser_create_user.add_argument('--username', type=str, default='admin', help='admin username')\n parser_create_user.add_argument('--password', type=str, default='password', help='admin password')\n parser_create_user.add_argument('--email', type=str, default='[email protected]', help='admin email')\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser('webserver', help='see `webserver -h`')\n parser_server.add_argument('--port', type=int, default=8000, help='port number')\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser('task', help='see `task -h`')\n parser_queue.add_argument('--concurrency', type=int, default=2, help='concurrency')\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser('help', help='see `help -h`')\n parser_help.add_argument('command', help='command name which help is shown')\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, 'handler'):\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == '__main__':\n main()\n", "path": "backend/cli.py"}]} | 2,442 | 84 |
gh_patches_debug_34797 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-657 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
precommit flake8 check is missing files
elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py is missed by the checker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/data/codec/__init__.py`
Content:
```
1 from elasticdl.python.data.codec.bytes_codec import BytesCodec
2 from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec
3
4 __all__ = [BytesCodec, TFExampleCodec]
5
```
Path: `elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py`
Content:
```
1 import argparse
2 import os
3
4 from pyspark import SparkContext
5 from pyspark import TaskContext
6
7 import numpy as np
8 from elasticdl.python.elasticdl.common.model_helper import load_user_model
9 from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \
10 convert_numpy_to_recordio
11
12
13 def process_data(
14 feature_label_columns,
15 single_file_preparation_func,
16 output_dir,
17 records_per_file,
18 codec_type,
19 ):
20 def _process_data(file_list):
21 ctx = TaskContext()
22 feature_list = []
23 label_list = []
24 for file in file_list:
25 feature_label_tuple = single_file_preparation_func(file)
26 assert len(feature_label_tuple) == 2
27 feature_list.append(feature_label_tuple[0])
28 label_list.append(feature_label_tuple[1])
29 convert_numpy_to_recordio(
30 output_dir,
31 np.array(feature_list),
32 np.array(label_list),
33 feature_label_columns,
34 records_per_file,
35 codec_type,
36 str(ctx.partitionId()),
37 )
38 return file_list
39 return _process_data
40
41
42 def main():
43 parser = argparse.ArgumentParser(
44 description="Spark job to convert training data to RecordIO format"
45 )
46 parser.add_argument(
47 "--training_data_dir",
48 help="Directory that contains training data and will be traversed \
49 recursively",
50 required=True,
51 )
52 parser.add_argument(
53 "--output_dir",
54 help="Directory of output RecordIO data",
55 required=True,
56 )
57 parser.add_argument(
58 "--model_file",
59 required=True,
60 help="User-defined model file which data processing logic is in",
61 )
62 parser.add_argument(
63 "--records_per_file", default=1024, type=int, help="Record per file"
64 )
65 parser.add_argument(
66 "--codec_type",
67 default="tf_example",
68 choices=["tf_example", "bytes"],
69 help="Type of codec(tf_example or bytes)",
70 )
71 parser.add_argument(
72 "--num_workers",
73 default=2,
74 type=int,
75 help="Number of workers of Spark job",
76 )
77
78 args = parser.parse_args()
79
80 # Get training data files recursively from training_data_dir
81 file_list = []
82 for dir_name, subdir_list, files in os.walk(args.training_data_dir):
83 for fname in files:
84 if not fname.startswith('.'):
85 file_list.append(os.path.join(dir_name, fname))
86
87 # Load user-defined model
88 model_module = load_user_model(args.model_file)
89
90 if not os.path.exists(args.output_dir):
91 os.makedirs(args.output_dir)
92
93 # Start the Spark job
94 sc = SparkContext()
95 rdd = sc.parallelize(file_list, args.num_workers)
96 rdd.mapPartitions(
97 process_data(
98 model_module.feature_columns() + model_module.label_columns(),
99 model_module.prepare_data_for_a_single_file,
100 args.output_dir,
101 args.records_per_file,
102 args.codec_type,
103 )
104 ).collect()
105
106
107 if __name__ == "__main__":
108 main()
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/data/codec/__init__.py b/elasticdl/python/data/codec/__init__.py
--- a/elasticdl/python/data/codec/__init__.py
+++ b/elasticdl/python/data/codec/__init__.py
@@ -1,4 +1,4 @@
from elasticdl.python.data.codec.bytes_codec import BytesCodec
from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec
-__all__ = [BytesCodec, TFExampleCodec]
+__all__ = ["BytesCodec", "TFExampleCodec"]
diff --git a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
--- a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
+++ b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
@@ -6,8 +6,9 @@
import numpy as np
from elasticdl.python.elasticdl.common.model_helper import load_user_model
-from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \
- convert_numpy_to_recordio
+from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (
+ convert_numpy_to_recordio,
+)
def process_data(
@@ -36,6 +37,7 @@
str(ctx.partitionId()),
)
return file_list
+
return _process_data
@@ -50,9 +52,7 @@
required=True,
)
parser.add_argument(
- "--output_dir",
- help="Directory of output RecordIO data",
- required=True,
+ "--output_dir", help="Directory of output RecordIO data", required=True
)
parser.add_argument(
"--model_file",
@@ -81,12 +81,12 @@
file_list = []
for dir_name, subdir_list, files in os.walk(args.training_data_dir):
for fname in files:
- if not fname.startswith('.'):
+ if not fname.startswith("."):
file_list.append(os.path.join(dir_name, fname))
# Load user-defined model
model_module = load_user_model(args.model_file)
-
+
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
| {"golden_diff": "diff --git a/elasticdl/python/data/codec/__init__.py b/elasticdl/python/data/codec/__init__.py\n--- a/elasticdl/python/data/codec/__init__.py\n+++ b/elasticdl/python/data/codec/__init__.py\n@@ -1,4 +1,4 @@\n from elasticdl.python.data.codec.bytes_codec import BytesCodec\n from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n \n-__all__ = [BytesCodec, TFExampleCodec]\n+__all__ = [\"BytesCodec\", \"TFExampleCodec\"]\ndiff --git a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n--- a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n+++ b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n@@ -6,8 +6,9 @@\n \n import numpy as np\n from elasticdl.python.elasticdl.common.model_helper import load_user_model\n-from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \\\n- convert_numpy_to_recordio\n+from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (\n+ convert_numpy_to_recordio,\n+)\n \n \n def process_data(\n@@ -36,6 +37,7 @@\n str(ctx.partitionId()),\n )\n return file_list\n+\n return _process_data\n \n \n@@ -50,9 +52,7 @@\n required=True,\n )\n parser.add_argument(\n- \"--output_dir\",\n- help=\"Directory of output RecordIO data\",\n- required=True,\n+ \"--output_dir\", help=\"Directory of output RecordIO data\", required=True\n )\n parser.add_argument(\n \"--model_file\",\n@@ -81,12 +81,12 @@\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n- if not fname.startswith('.'):\n+ if not fname.startswith(\".\"):\n file_list.append(os.path.join(dir_name, fname))\n \n # Load user-defined model\n model_module = load_user_model(args.model_file)\n- \n+\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n", "issue": "precommit flake8 check is missing files\nelasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py is missed by the checker.\n", "before_files": [{"content": "from elasticdl.python.data.codec.bytes_codec import BytesCodec\nfrom elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n\n__all__ = [BytesCodec, TFExampleCodec]\n", "path": "elasticdl/python/data/codec/__init__.py"}, {"content": "import argparse\nimport os\n\nfrom pyspark import SparkContext\nfrom pyspark import TaskContext\n\nimport numpy as np\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model\nfrom elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \\\n convert_numpy_to_recordio\n\n\ndef process_data(\n feature_label_columns,\n single_file_preparation_func,\n output_dir,\n records_per_file,\n codec_type,\n):\n def _process_data(file_list):\n ctx = TaskContext()\n feature_list = []\n label_list = []\n for file in file_list:\n feature_label_tuple = single_file_preparation_func(file)\n assert len(feature_label_tuple) == 2\n feature_list.append(feature_label_tuple[0])\n label_list.append(feature_label_tuple[1])\n convert_numpy_to_recordio(\n output_dir,\n np.array(feature_list),\n np.array(label_list),\n feature_label_columns,\n records_per_file,\n codec_type,\n str(ctx.partitionId()),\n )\n return file_list\n return _process_data\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Spark job to convert training data to RecordIO format\"\n )\n parser.add_argument(\n \"--training_data_dir\",\n help=\"Directory that contains training data and will be traversed \\\n recursively\",\n required=True,\n )\n parser.add_argument(\n \"--output_dir\",\n help=\"Directory of output RecordIO data\",\n required=True,\n )\n parser.add_argument(\n \"--model_file\",\n required=True,\n help=\"User-defined model file which data processing logic is in\",\n )\n parser.add_argument(\n \"--records_per_file\", default=1024, type=int, help=\"Record per file\"\n )\n parser.add_argument(\n \"--codec_type\",\n default=\"tf_example\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=2,\n type=int,\n help=\"Number of workers of Spark job\",\n )\n\n args = parser.parse_args()\n\n # Get training data files recursively from training_data_dir\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n if not fname.startswith('.'):\n file_list.append(os.path.join(dir_name, fname))\n\n # Load user-defined model\n model_module = load_user_model(args.model_file)\n \n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Start the Spark job\n sc = SparkContext()\n rdd = sc.parallelize(file_list, args.num_workers)\n rdd.mapPartitions(\n process_data(\n model_module.feature_columns() + model_module.label_columns(),\n model_module.prepare_data_for_a_single_file,\n args.output_dir,\n args.records_per_file,\n args.codec_type,\n )\n ).collect()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py"}], "after_files": [{"content": "from elasticdl.python.data.codec.bytes_codec import BytesCodec\nfrom elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n\n__all__ = [\"BytesCodec\", \"TFExampleCodec\"]\n", "path": "elasticdl/python/data/codec/__init__.py"}, {"content": "import argparse\nimport os\n\nfrom pyspark import SparkContext\nfrom pyspark import TaskContext\n\nimport numpy as np\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model\nfrom elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (\n convert_numpy_to_recordio,\n)\n\n\ndef process_data(\n feature_label_columns,\n single_file_preparation_func,\n output_dir,\n records_per_file,\n codec_type,\n):\n def _process_data(file_list):\n ctx = TaskContext()\n feature_list = []\n label_list = []\n for file in file_list:\n feature_label_tuple = single_file_preparation_func(file)\n assert len(feature_label_tuple) == 2\n feature_list.append(feature_label_tuple[0])\n label_list.append(feature_label_tuple[1])\n convert_numpy_to_recordio(\n output_dir,\n np.array(feature_list),\n np.array(label_list),\n feature_label_columns,\n records_per_file,\n codec_type,\n str(ctx.partitionId()),\n )\n return file_list\n\n return _process_data\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Spark job to convert training data to RecordIO format\"\n )\n parser.add_argument(\n \"--training_data_dir\",\n help=\"Directory that contains training data and will be traversed \\\n recursively\",\n required=True,\n )\n parser.add_argument(\n \"--output_dir\", help=\"Directory of output RecordIO data\", required=True\n )\n parser.add_argument(\n \"--model_file\",\n required=True,\n help=\"User-defined model file which data processing logic is in\",\n )\n parser.add_argument(\n \"--records_per_file\", default=1024, type=int, help=\"Record per file\"\n )\n parser.add_argument(\n \"--codec_type\",\n default=\"tf_example\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=2,\n type=int,\n help=\"Number of workers of Spark job\",\n )\n\n args = parser.parse_args()\n\n # Get training data files recursively from training_data_dir\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n if not fname.startswith(\".\"):\n file_list.append(os.path.join(dir_name, fname))\n\n # Load user-defined model\n model_module = load_user_model(args.model_file)\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Start the Spark job\n sc = SparkContext()\n rdd = sc.parallelize(file_list, args.num_workers)\n rdd.mapPartitions(\n process_data(\n model_module.feature_columns() + model_module.label_columns(),\n model_module.prepare_data_for_a_single_file,\n args.output_dir,\n args.records_per_file,\n args.codec_type,\n )\n ).collect()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py"}]} | 1,238 | 521 |
gh_patches_debug_16462 | rasdani/github-patches | git_diff | napari__napari-6268 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG `changing` event in `EventedDict` not emitted
## 🐛 Bug
The `changing` event in [`EventedDict`](https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L9) is created but not documented or emitted:
https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L75-L77
It seems not to be missed, should it be added?
<!-- A clear and concise description of what the bug is. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/events/containers/_evented_dict.py`
Content:
```
1 """MutableMapping that emits events when altered."""
2 from typing import Mapping, Optional, Sequence, Type, Union
3
4 from napari.utils.events.containers._dict import _K, _T, TypedMutableMapping
5 from napari.utils.events.event import EmitterGroup, Event
6 from napari.utils.events.types import SupportsEvents
7
8
9 class EventedDict(TypedMutableMapping[_K, _T]):
10 """Mutable dictionary that emits events when altered.
11
12 This class is designed to behave exactly like builtin ``dict``, but
13 will emit events before and after all mutations (addition, removal, and
14 changing).
15
16 Parameters
17 ----------
18 data : Mapping, optional
19 Dictionary to initialize the class with.
20 basetype : type of sequence of types, optional
21 Type of the element in the dictionary.
22
23 Events
24 ------
25 changed (key: K, old_value: T, value: T)
26 emitted when item at ``key`` is changed from ``old_value`` to ``value``
27 adding (key: K)
28 emitted before an item is added to the dictionary with ``key``
29 added (key: K, value: T)
30 emitted after ``value`` was added to the dictionary with ``key``
31 removing (key: K)
32 emitted before ``key`` is removed from the dictionary
33 removed (key: K, value: T)
34 emitted after ``key`` was removed from the dictionary
35 updated (key, K, value: T)
36 emitted after ``value`` of ``key`` was changed. Only implemented by
37 subclasses to give them an option to trigger some update after ``value``
38 was changed and this class did not register it. This can be useful if
39 the ``basetype`` is not an evented object.
40 """
41
42 events: EmitterGroup
43
44 def __init__(
45 self,
46 data: Optional[Mapping[_K, _T]] = None,
47 basetype: Union[Type[_T], Sequence[Type[_T]]] = (),
48 ) -> None:
49 _events = {
50 "changing": None,
51 "changed": None,
52 "adding": None,
53 "added": None,
54 "removing": None,
55 "removed": None,
56 "updated": None,
57 }
58 # For inheritance: If the mro already provides an EmitterGroup, add...
59 if hasattr(self, "events") and isinstance(self.events, EmitterGroup):
60 self.events.add(**_events)
61 else:
62 # otherwise create a new one
63 self.events = EmitterGroup(
64 source=self, auto_connect=False, **_events
65 )
66 super().__init__(data, basetype)
67
68 def __setitem__(self, key: _K, value: _T):
69 old = self._dict.get(key)
70 if value is old or value == old:
71 return
72 if old is None:
73 self.events.adding(key=key)
74 super().__setitem__(key, value)
75 self.events.added(key=key, value=value)
76 self._connect_child_emitters(value)
77 else:
78 super().__setitem__(key, value)
79 self.events.changed(key=key, old_value=old, value=value)
80
81 def __delitem__(self, key: _K):
82 self.events.removing(key=key)
83 self._disconnect_child_emitters(self[key])
84 item = self._dict.pop(key)
85 self.events.removed(key=key, value=item)
86
87 def _reemit_child_event(self, event: Event):
88 """An item in the dict emitted an event. Re-emit with key"""
89 if not hasattr(event, "key"):
90 event.key = self.key(event.source)
91
92 # re-emit with this object's EventEmitter
93 self.events(event)
94
95 def _disconnect_child_emitters(self, child: _T):
96 """Disconnect all events from the child from the re-emitter."""
97 if isinstance(child, SupportsEvents):
98 child.events.disconnect(self._reemit_child_event)
99
100 def _connect_child_emitters(self, child: _T):
101 """Connect all events from the child to be re-emitted."""
102 if isinstance(child, SupportsEvents):
103 # make sure the event source has been set on the child
104 if child.events.source is None:
105 child.events.source = child
106 child.events.connect(self._reemit_child_event)
107
108 def key(self, value: _T):
109 """Return first instance of value."""
110 for k, v in self._dict.items():
111 if v is value or v == value:
112 return k
113 return None
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/events/containers/_evented_dict.py b/napari/utils/events/containers/_evented_dict.py
--- a/napari/utils/events/containers/_evented_dict.py
+++ b/napari/utils/events/containers/_evented_dict.py
@@ -22,6 +22,8 @@
Events
------
+ changing (key: K)
+ emitted before an item at ``key`` is changed
changed (key: K, old_value: T, value: T)
emitted when item at ``key`` is changed from ``old_value`` to ``value``
adding (key: K)
@@ -75,6 +77,7 @@
self.events.added(key=key, value=value)
self._connect_child_emitters(value)
else:
+ self.events.changing(key=key)
super().__setitem__(key, value)
self.events.changed(key=key, old_value=old, value=value)
| {"golden_diff": "diff --git a/napari/utils/events/containers/_evented_dict.py b/napari/utils/events/containers/_evented_dict.py\n--- a/napari/utils/events/containers/_evented_dict.py\n+++ b/napari/utils/events/containers/_evented_dict.py\n@@ -22,6 +22,8 @@\n \n Events\n ------\n+ changing (key: K)\n+ emitted before an item at ``key`` is changed\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n@@ -75,6 +77,7 @@\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n+ self.events.changing(key=key)\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n", "issue": "BUG `changing` event in `EventedDict` not emitted\n## \ud83d\udc1b Bug\r\n\r\nThe `changing` event in [`EventedDict`](https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L9) is created but not documented or emitted:\r\n\r\nhttps://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L75-L77\r\n\r\nIt seems not to be missed, should it be added?\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"MutableMapping that emits events when altered.\"\"\"\nfrom typing import Mapping, Optional, Sequence, Type, Union\n\nfrom napari.utils.events.containers._dict import _K, _T, TypedMutableMapping\nfrom napari.utils.events.event import EmitterGroup, Event\nfrom napari.utils.events.types import SupportsEvents\n\n\nclass EventedDict(TypedMutableMapping[_K, _T]):\n \"\"\"Mutable dictionary that emits events when altered.\n\n This class is designed to behave exactly like builtin ``dict``, but\n will emit events before and after all mutations (addition, removal, and\n changing).\n\n Parameters\n ----------\n data : Mapping, optional\n Dictionary to initialize the class with.\n basetype : type of sequence of types, optional\n Type of the element in the dictionary.\n\n Events\n ------\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n emitted before an item is added to the dictionary with ``key``\n added (key: K, value: T)\n emitted after ``value`` was added to the dictionary with ``key``\n removing (key: K)\n emitted before ``key`` is removed from the dictionary\n removed (key: K, value: T)\n emitted after ``key`` was removed from the dictionary\n updated (key, K, value: T)\n emitted after ``value`` of ``key`` was changed. Only implemented by\n subclasses to give them an option to trigger some update after ``value``\n was changed and this class did not register it. This can be useful if\n the ``basetype`` is not an evented object.\n \"\"\"\n\n events: EmitterGroup\n\n def __init__(\n self,\n data: Optional[Mapping[_K, _T]] = None,\n basetype: Union[Type[_T], Sequence[Type[_T]]] = (),\n ) -> None:\n _events = {\n \"changing\": None,\n \"changed\": None,\n \"adding\": None,\n \"added\": None,\n \"removing\": None,\n \"removed\": None,\n \"updated\": None,\n }\n # For inheritance: If the mro already provides an EmitterGroup, add...\n if hasattr(self, \"events\") and isinstance(self.events, EmitterGroup):\n self.events.add(**_events)\n else:\n # otherwise create a new one\n self.events = EmitterGroup(\n source=self, auto_connect=False, **_events\n )\n super().__init__(data, basetype)\n\n def __setitem__(self, key: _K, value: _T):\n old = self._dict.get(key)\n if value is old or value == old:\n return\n if old is None:\n self.events.adding(key=key)\n super().__setitem__(key, value)\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n\n def __delitem__(self, key: _K):\n self.events.removing(key=key)\n self._disconnect_child_emitters(self[key])\n item = self._dict.pop(key)\n self.events.removed(key=key, value=item)\n\n def _reemit_child_event(self, event: Event):\n \"\"\"An item in the dict emitted an event. Re-emit with key\"\"\"\n if not hasattr(event, \"key\"):\n event.key = self.key(event.source)\n\n # re-emit with this object's EventEmitter\n self.events(event)\n\n def _disconnect_child_emitters(self, child: _T):\n \"\"\"Disconnect all events from the child from the re-emitter.\"\"\"\n if isinstance(child, SupportsEvents):\n child.events.disconnect(self._reemit_child_event)\n\n def _connect_child_emitters(self, child: _T):\n \"\"\"Connect all events from the child to be re-emitted.\"\"\"\n if isinstance(child, SupportsEvents):\n # make sure the event source has been set on the child\n if child.events.source is None:\n child.events.source = child\n child.events.connect(self._reemit_child_event)\n\n def key(self, value: _T):\n \"\"\"Return first instance of value.\"\"\"\n for k, v in self._dict.items():\n if v is value or v == value:\n return k\n return None\n", "path": "napari/utils/events/containers/_evented_dict.py"}], "after_files": [{"content": "\"\"\"MutableMapping that emits events when altered.\"\"\"\nfrom typing import Mapping, Optional, Sequence, Type, Union\n\nfrom napari.utils.events.containers._dict import _K, _T, TypedMutableMapping\nfrom napari.utils.events.event import EmitterGroup, Event\nfrom napari.utils.events.types import SupportsEvents\n\n\nclass EventedDict(TypedMutableMapping[_K, _T]):\n \"\"\"Mutable dictionary that emits events when altered.\n\n This class is designed to behave exactly like builtin ``dict``, but\n will emit events before and after all mutations (addition, removal, and\n changing).\n\n Parameters\n ----------\n data : Mapping, optional\n Dictionary to initialize the class with.\n basetype : type of sequence of types, optional\n Type of the element in the dictionary.\n\n Events\n ------\n changing (key: K)\n emitted before an item at ``key`` is changed\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n emitted before an item is added to the dictionary with ``key``\n added (key: K, value: T)\n emitted after ``value`` was added to the dictionary with ``key``\n removing (key: K)\n emitted before ``key`` is removed from the dictionary\n removed (key: K, value: T)\n emitted after ``key`` was removed from the dictionary\n updated (key, K, value: T)\n emitted after ``value`` of ``key`` was changed. Only implemented by\n subclasses to give them an option to trigger some update after ``value``\n was changed and this class did not register it. This can be useful if\n the ``basetype`` is not an evented object.\n \"\"\"\n\n events: EmitterGroup\n\n def __init__(\n self,\n data: Optional[Mapping[_K, _T]] = None,\n basetype: Union[Type[_T], Sequence[Type[_T]]] = (),\n ) -> None:\n _events = {\n \"changing\": None,\n \"changed\": None,\n \"adding\": None,\n \"added\": None,\n \"removing\": None,\n \"removed\": None,\n \"updated\": None,\n }\n # For inheritance: If the mro already provides an EmitterGroup, add...\n if hasattr(self, \"events\") and isinstance(self.events, EmitterGroup):\n self.events.add(**_events)\n else:\n # otherwise create a new one\n self.events = EmitterGroup(\n source=self, auto_connect=False, **_events\n )\n super().__init__(data, basetype)\n\n def __setitem__(self, key: _K, value: _T):\n old = self._dict.get(key)\n if value is old or value == old:\n return\n if old is None:\n self.events.adding(key=key)\n super().__setitem__(key, value)\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n self.events.changing(key=key)\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n\n def __delitem__(self, key: _K):\n self.events.removing(key=key)\n self._disconnect_child_emitters(self[key])\n item = self._dict.pop(key)\n self.events.removed(key=key, value=item)\n\n def _reemit_child_event(self, event: Event):\n \"\"\"An item in the dict emitted an event. Re-emit with key\"\"\"\n if not hasattr(event, \"key\"):\n event.key = self.key(event.source)\n\n # re-emit with this object's EventEmitter\n self.events(event)\n\n def _disconnect_child_emitters(self, child: _T):\n \"\"\"Disconnect all events from the child from the re-emitter.\"\"\"\n if isinstance(child, SupportsEvents):\n child.events.disconnect(self._reemit_child_event)\n\n def _connect_child_emitters(self, child: _T):\n \"\"\"Connect all events from the child to be re-emitted.\"\"\"\n if isinstance(child, SupportsEvents):\n # make sure the event source has been set on the child\n if child.events.source is None:\n child.events.source = child\n child.events.connect(self._reemit_child_event)\n\n def key(self, value: _T):\n \"\"\"Return first instance of value.\"\"\"\n for k, v in self._dict.items():\n if v is value or v == value:\n return k\n return None\n", "path": "napari/utils/events/containers/_evented_dict.py"}]} | 1,672 | 211 |
gh_patches_debug_40369 | rasdani/github-patches | git_diff | python-discord__bot-1514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Only punish users once to prevent a race condition in AntiSpam
If a user manages to take part of multiple filter violations, the cog will attempt to `punish()` them for each of those violations. While `punish()` does check for the muted role before applying the infraction, the asynchronous nature of the code can mean a second `punish()` is called and passes the role check before the first `punish()` has gotten a chance to apply the mute. This would result in an attempt to apply a second mute infraction, which will ultimately fail and display a message stating the user already has such infraction.
I believe the fix is to keep track of which members were already punished so that they can be skipped when processing members for the rule violation. One has to consider this globally, not just for the current deletion context. Maybe using locks for each member would just be easier.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/exts/filters/antispam.py`
Content:
```
1 import asyncio
2 import logging
3 from collections.abc import Mapping
4 from dataclasses import dataclass, field
5 from datetime import datetime, timedelta
6 from operator import itemgetter
7 from typing import Dict, Iterable, List, Set
8
9 from discord import Colour, Member, Message, NotFound, Object, TextChannel
10 from discord.ext.commands import Cog
11
12 from bot import rules
13 from bot.bot import Bot
14 from bot.constants import (
15 AntiSpam as AntiSpamConfig, Channels,
16 Colours, DEBUG_MODE, Event, Filter,
17 Guild as GuildConfig, Icons,
18 )
19 from bot.converters import Duration
20 from bot.exts.moderation.modlog import ModLog
21 from bot.utils.messages import format_user, send_attachments
22
23
24 log = logging.getLogger(__name__)
25
26 RULE_FUNCTION_MAPPING = {
27 'attachments': rules.apply_attachments,
28 'burst': rules.apply_burst,
29 # burst shared is temporarily disabled due to a bug
30 # 'burst_shared': rules.apply_burst_shared,
31 'chars': rules.apply_chars,
32 'discord_emojis': rules.apply_discord_emojis,
33 'duplicates': rules.apply_duplicates,
34 'links': rules.apply_links,
35 'mentions': rules.apply_mentions,
36 'newlines': rules.apply_newlines,
37 'role_mentions': rules.apply_role_mentions,
38 }
39
40
41 @dataclass
42 class DeletionContext:
43 """Represents a Deletion Context for a single spam event."""
44
45 channel: TextChannel
46 members: Dict[int, Member] = field(default_factory=dict)
47 rules: Set[str] = field(default_factory=set)
48 messages: Dict[int, Message] = field(default_factory=dict)
49 attachments: List[List[str]] = field(default_factory=list)
50
51 async def add(self, rule_name: str, members: Iterable[Member], messages: Iterable[Message]) -> None:
52 """Adds new rule violation events to the deletion context."""
53 self.rules.add(rule_name)
54
55 for member in members:
56 if member.id not in self.members:
57 self.members[member.id] = member
58
59 for message in messages:
60 if message.id not in self.messages:
61 self.messages[message.id] = message
62
63 # Re-upload attachments
64 destination = message.guild.get_channel(Channels.attachment_log)
65 urls = await send_attachments(message, destination, link_large=False)
66 self.attachments.append(urls)
67
68 async def upload_messages(self, actor_id: int, modlog: ModLog) -> None:
69 """Method that takes care of uploading the queue and posting modlog alert."""
70 triggered_by_users = ", ".join(format_user(m) for m in self.members.values())
71
72 mod_alert_message = (
73 f"**Triggered by:** {triggered_by_users}\n"
74 f"**Channel:** {self.channel.mention}\n"
75 f"**Rules:** {', '.join(rule for rule in self.rules)}\n"
76 )
77
78 # For multiple messages or those with excessive newlines, use the logs API
79 if len(self.messages) > 1 or 'newlines' in self.rules:
80 url = await modlog.upload_log(self.messages.values(), actor_id, self.attachments)
81 mod_alert_message += f"A complete log of the offending messages can be found [here]({url})"
82 else:
83 mod_alert_message += "Message:\n"
84 [message] = self.messages.values()
85 content = message.clean_content
86 remaining_chars = 2040 - len(mod_alert_message)
87
88 if len(content) > remaining_chars:
89 content = content[:remaining_chars] + "..."
90
91 mod_alert_message += f"{content}"
92
93 *_, last_message = self.messages.values()
94 await modlog.send_log_message(
95 icon_url=Icons.filtering,
96 colour=Colour(Colours.soft_red),
97 title="Spam detected!",
98 text=mod_alert_message,
99 thumbnail=last_message.author.avatar_url_as(static_format="png"),
100 channel_id=Channels.mod_alerts,
101 ping_everyone=AntiSpamConfig.ping_everyone
102 )
103
104
105 class AntiSpam(Cog):
106 """Cog that controls our anti-spam measures."""
107
108 def __init__(self, bot: Bot, validation_errors: Dict[str, str]) -> None:
109 self.bot = bot
110 self.validation_errors = validation_errors
111 role_id = AntiSpamConfig.punishment['role_id']
112 self.muted_role = Object(role_id)
113 self.expiration_date_converter = Duration()
114
115 self.message_deletion_queue = dict()
116
117 self.bot.loop.create_task(self.alert_on_validation_error())
118
119 @property
120 def mod_log(self) -> ModLog:
121 """Allows for easy access of the ModLog cog."""
122 return self.bot.get_cog("ModLog")
123
124 async def alert_on_validation_error(self) -> None:
125 """Unloads the cog and alerts admins if configuration validation failed."""
126 await self.bot.wait_until_guild_available()
127 if self.validation_errors:
128 body = "**The following errors were encountered:**\n"
129 body += "\n".join(f"- {error}" for error in self.validation_errors.values())
130 body += "\n\n**The cog has been unloaded.**"
131
132 await self.mod_log.send_log_message(
133 title="Error: AntiSpam configuration validation failed!",
134 text=body,
135 ping_everyone=True,
136 icon_url=Icons.token_removed,
137 colour=Colour.red()
138 )
139
140 self.bot.remove_cog(self.__class__.__name__)
141 return
142
143 @Cog.listener()
144 async def on_message(self, message: Message) -> None:
145 """Applies the antispam rules to each received message."""
146 if (
147 not message.guild
148 or message.guild.id != GuildConfig.id
149 or message.author.bot
150 or (message.channel.id in Filter.channel_whitelist and not DEBUG_MODE)
151 or (any(role.id in Filter.role_whitelist for role in message.author.roles) and not DEBUG_MODE)
152 ):
153 return
154
155 # Fetch the rule configuration with the highest rule interval.
156 max_interval_config = max(
157 AntiSpamConfig.rules.values(),
158 key=itemgetter('interval')
159 )
160 max_interval = max_interval_config['interval']
161
162 # Store history messages since `interval` seconds ago in a list to prevent unnecessary API calls.
163 earliest_relevant_at = datetime.utcnow() - timedelta(seconds=max_interval)
164 relevant_messages = [
165 msg async for msg in message.channel.history(after=earliest_relevant_at, oldest_first=False)
166 if not msg.author.bot
167 ]
168
169 for rule_name in AntiSpamConfig.rules:
170 rule_config = AntiSpamConfig.rules[rule_name]
171 rule_function = RULE_FUNCTION_MAPPING[rule_name]
172
173 # Create a list of messages that were sent in the interval that the rule cares about.
174 latest_interesting_stamp = datetime.utcnow() - timedelta(seconds=rule_config['interval'])
175 messages_for_rule = [
176 msg for msg in relevant_messages if msg.created_at > latest_interesting_stamp
177 ]
178 result = await rule_function(message, messages_for_rule, rule_config)
179
180 # If the rule returns `None`, that means the message didn't violate it.
181 # If it doesn't, it returns a tuple in the form `(str, Iterable[discord.Member])`
182 # which contains the reason for why the message violated the rule and
183 # an iterable of all members that violated the rule.
184 if result is not None:
185 self.bot.stats.incr(f"mod_alerts.{rule_name}")
186 reason, members, relevant_messages = result
187 full_reason = f"`{rule_name}` rule: {reason}"
188
189 # If there's no spam event going on for this channel, start a new Message Deletion Context
190 channel = message.channel
191 if channel.id not in self.message_deletion_queue:
192 log.trace(f"Creating queue for channel `{channel.id}`")
193 self.message_deletion_queue[message.channel.id] = DeletionContext(channel)
194 self.bot.loop.create_task(self._process_deletion_context(message.channel.id))
195
196 # Add the relevant of this trigger to the Deletion Context
197 await self.message_deletion_queue[message.channel.id].add(
198 rule_name=rule_name,
199 members=members,
200 messages=relevant_messages
201 )
202
203 for member in members:
204
205 # Fire it off as a background task to ensure
206 # that the sleep doesn't block further tasks
207 self.bot.loop.create_task(
208 self.punish(message, member, full_reason)
209 )
210
211 await self.maybe_delete_messages(channel, relevant_messages)
212 break
213
214 async def punish(self, msg: Message, member: Member, reason: str) -> None:
215 """Punishes the given member for triggering an antispam rule."""
216 if not any(role.id == self.muted_role.id for role in member.roles):
217 remove_role_after = AntiSpamConfig.punishment['remove_after']
218
219 # Get context and make sure the bot becomes the actor of infraction by patching the `author` attributes
220 context = await self.bot.get_context(msg)
221 context.author = self.bot.user
222
223 # Since we're going to invoke the tempmute command directly, we need to manually call the converter.
224 dt_remove_role_after = await self.expiration_date_converter.convert(context, f"{remove_role_after}S")
225 await context.invoke(
226 self.bot.get_command('tempmute'),
227 member,
228 dt_remove_role_after,
229 reason=reason
230 )
231
232 async def maybe_delete_messages(self, channel: TextChannel, messages: List[Message]) -> None:
233 """Cleans the messages if cleaning is configured."""
234 if AntiSpamConfig.clean_offending:
235 # If we have more than one message, we can use bulk delete.
236 if len(messages) > 1:
237 message_ids = [message.id for message in messages]
238 self.mod_log.ignore(Event.message_delete, *message_ids)
239 await channel.delete_messages(messages)
240
241 # Otherwise, the bulk delete endpoint will throw up.
242 # Delete the message directly instead.
243 else:
244 self.mod_log.ignore(Event.message_delete, messages[0].id)
245 try:
246 await messages[0].delete()
247 except NotFound:
248 log.info(f"Tried to delete message `{messages[0].id}`, but message could not be found.")
249
250 async def _process_deletion_context(self, context_id: int) -> None:
251 """Processes the Deletion Context queue."""
252 log.trace("Sleeping before processing message deletion queue.")
253 await asyncio.sleep(10)
254
255 if context_id not in self.message_deletion_queue:
256 log.error(f"Started processing deletion queue for context `{context_id}`, but it was not found!")
257 return
258
259 deletion_context = self.message_deletion_queue.pop(context_id)
260 await deletion_context.upload_messages(self.bot.user.id, self.mod_log)
261
262
263 def validate_config(rules_: Mapping = AntiSpamConfig.rules) -> Dict[str, str]:
264 """Validates the antispam configs."""
265 validation_errors = {}
266 for name, config in rules_.items():
267 if name not in RULE_FUNCTION_MAPPING:
268 log.error(
269 f"Unrecognized antispam rule `{name}`. "
270 f"Valid rules are: {', '.join(RULE_FUNCTION_MAPPING)}"
271 )
272 validation_errors[name] = f"`{name}` is not recognized as an antispam rule."
273 continue
274 for required_key in ('interval', 'max'):
275 if required_key not in config:
276 log.error(
277 f"`{required_key}` is required but was not "
278 f"set in rule `{name}`'s configuration."
279 )
280 validation_errors[name] = f"Key `{required_key}` is required but not set for rule `{name}`"
281 return validation_errors
282
283
284 def setup(bot: Bot) -> None:
285 """Validate the AntiSpam configs and load the AntiSpam cog."""
286 validation_errors = validate_config()
287 bot.add_cog(AntiSpam(bot, validation_errors))
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/exts/filters/antispam.py b/bot/exts/filters/antispam.py
--- a/bot/exts/filters/antispam.py
+++ b/bot/exts/filters/antispam.py
@@ -3,7 +3,7 @@
from collections.abc import Mapping
from dataclasses import dataclass, field
from datetime import datetime, timedelta
-from operator import itemgetter
+from operator import attrgetter, itemgetter
from typing import Dict, Iterable, List, Set
from discord import Colour, Member, Message, NotFound, Object, TextChannel
@@ -18,6 +18,7 @@
)
from bot.converters import Duration
from bot.exts.moderation.modlog import ModLog
+from bot.utils import lock, scheduling
from bot.utils.messages import format_user, send_attachments
@@ -114,7 +115,7 @@
self.message_deletion_queue = dict()
- self.bot.loop.create_task(self.alert_on_validation_error())
+ self.bot.loop.create_task(self.alert_on_validation_error(), name="AntiSpam.alert_on_validation_error")
@property
def mod_log(self) -> ModLog:
@@ -191,7 +192,10 @@
if channel.id not in self.message_deletion_queue:
log.trace(f"Creating queue for channel `{channel.id}`")
self.message_deletion_queue[message.channel.id] = DeletionContext(channel)
- self.bot.loop.create_task(self._process_deletion_context(message.channel.id))
+ scheduling.create_task(
+ self._process_deletion_context(message.channel.id),
+ name=f"AntiSpam._process_deletion_context({message.channel.id})"
+ )
# Add the relevant of this trigger to the Deletion Context
await self.message_deletion_queue[message.channel.id].add(
@@ -201,16 +205,15 @@
)
for member in members:
-
- # Fire it off as a background task to ensure
- # that the sleep doesn't block further tasks
- self.bot.loop.create_task(
- self.punish(message, member, full_reason)
+ scheduling.create_task(
+ self.punish(message, member, full_reason),
+ name=f"AntiSpam.punish(message={message.id}, member={member.id}, rule={rule_name})"
)
await self.maybe_delete_messages(channel, relevant_messages)
break
+ @lock.lock_arg("antispam.punish", "member", attrgetter("id"))
async def punish(self, msg: Message, member: Member, reason: str) -> None:
"""Punishes the given member for triggering an antispam rule."""
if not any(role.id == self.muted_role.id for role in member.roles):
| {"golden_diff": "diff --git a/bot/exts/filters/antispam.py b/bot/exts/filters/antispam.py\n--- a/bot/exts/filters/antispam.py\n+++ b/bot/exts/filters/antispam.py\n@@ -3,7 +3,7 @@\n from collections.abc import Mapping\n from dataclasses import dataclass, field\n from datetime import datetime, timedelta\n-from operator import itemgetter\n+from operator import attrgetter, itemgetter\n from typing import Dict, Iterable, List, Set\n \n from discord import Colour, Member, Message, NotFound, Object, TextChannel\n@@ -18,6 +18,7 @@\n )\n from bot.converters import Duration\n from bot.exts.moderation.modlog import ModLog\n+from bot.utils import lock, scheduling\n from bot.utils.messages import format_user, send_attachments\n \n \n@@ -114,7 +115,7 @@\n \n self.message_deletion_queue = dict()\n \n- self.bot.loop.create_task(self.alert_on_validation_error())\n+ self.bot.loop.create_task(self.alert_on_validation_error(), name=\"AntiSpam.alert_on_validation_error\")\n \n @property\n def mod_log(self) -> ModLog:\n@@ -191,7 +192,10 @@\n if channel.id not in self.message_deletion_queue:\n log.trace(f\"Creating queue for channel `{channel.id}`\")\n self.message_deletion_queue[message.channel.id] = DeletionContext(channel)\n- self.bot.loop.create_task(self._process_deletion_context(message.channel.id))\n+ scheduling.create_task(\n+ self._process_deletion_context(message.channel.id),\n+ name=f\"AntiSpam._process_deletion_context({message.channel.id})\"\n+ )\n \n # Add the relevant of this trigger to the Deletion Context\n await self.message_deletion_queue[message.channel.id].add(\n@@ -201,16 +205,15 @@\n )\n \n for member in members:\n-\n- # Fire it off as a background task to ensure\n- # that the sleep doesn't block further tasks\n- self.bot.loop.create_task(\n- self.punish(message, member, full_reason)\n+ scheduling.create_task(\n+ self.punish(message, member, full_reason),\n+ name=f\"AntiSpam.punish(message={message.id}, member={member.id}, rule={rule_name})\"\n )\n \n await self.maybe_delete_messages(channel, relevant_messages)\n break\n \n+ @lock.lock_arg(\"antispam.punish\", \"member\", attrgetter(\"id\"))\n async def punish(self, msg: Message, member: Member, reason: str) -> None:\n \"\"\"Punishes the given member for triggering an antispam rule.\"\"\"\n if not any(role.id == self.muted_role.id for role in member.roles):\n", "issue": "Only punish users once to prevent a race condition in AntiSpam\nIf a user manages to take part of multiple filter violations, the cog will attempt to `punish()` them for each of those violations. While `punish()` does check for the muted role before applying the infraction, the asynchronous nature of the code can mean a second `punish()` is called and passes the role check before the first `punish()` has gotten a chance to apply the mute. This would result in an attempt to apply a second mute infraction, which will ultimately fail and display a message stating the user already has such infraction.\r\n\r\nI believe the fix is to keep track of which members were already punished so that they can be skipped when processing members for the rule violation. One has to consider this globally, not just for the current deletion context. Maybe using locks for each member would just be easier.\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom collections.abc import Mapping\nfrom dataclasses import dataclass, field\nfrom datetime import datetime, timedelta\nfrom operator import itemgetter\nfrom typing import Dict, Iterable, List, Set\n\nfrom discord import Colour, Member, Message, NotFound, Object, TextChannel\nfrom discord.ext.commands import Cog\n\nfrom bot import rules\nfrom bot.bot import Bot\nfrom bot.constants import (\n AntiSpam as AntiSpamConfig, Channels,\n Colours, DEBUG_MODE, Event, Filter,\n Guild as GuildConfig, Icons,\n)\nfrom bot.converters import Duration\nfrom bot.exts.moderation.modlog import ModLog\nfrom bot.utils.messages import format_user, send_attachments\n\n\nlog = logging.getLogger(__name__)\n\nRULE_FUNCTION_MAPPING = {\n 'attachments': rules.apply_attachments,\n 'burst': rules.apply_burst,\n # burst shared is temporarily disabled due to a bug\n # 'burst_shared': rules.apply_burst_shared,\n 'chars': rules.apply_chars,\n 'discord_emojis': rules.apply_discord_emojis,\n 'duplicates': rules.apply_duplicates,\n 'links': rules.apply_links,\n 'mentions': rules.apply_mentions,\n 'newlines': rules.apply_newlines,\n 'role_mentions': rules.apply_role_mentions,\n}\n\n\n@dataclass\nclass DeletionContext:\n \"\"\"Represents a Deletion Context for a single spam event.\"\"\"\n\n channel: TextChannel\n members: Dict[int, Member] = field(default_factory=dict)\n rules: Set[str] = field(default_factory=set)\n messages: Dict[int, Message] = field(default_factory=dict)\n attachments: List[List[str]] = field(default_factory=list)\n\n async def add(self, rule_name: str, members: Iterable[Member], messages: Iterable[Message]) -> None:\n \"\"\"Adds new rule violation events to the deletion context.\"\"\"\n self.rules.add(rule_name)\n\n for member in members:\n if member.id not in self.members:\n self.members[member.id] = member\n\n for message in messages:\n if message.id not in self.messages:\n self.messages[message.id] = message\n\n # Re-upload attachments\n destination = message.guild.get_channel(Channels.attachment_log)\n urls = await send_attachments(message, destination, link_large=False)\n self.attachments.append(urls)\n\n async def upload_messages(self, actor_id: int, modlog: ModLog) -> None:\n \"\"\"Method that takes care of uploading the queue and posting modlog alert.\"\"\"\n triggered_by_users = \", \".join(format_user(m) for m in self.members.values())\n\n mod_alert_message = (\n f\"**Triggered by:** {triggered_by_users}\\n\"\n f\"**Channel:** {self.channel.mention}\\n\"\n f\"**Rules:** {', '.join(rule for rule in self.rules)}\\n\"\n )\n\n # For multiple messages or those with excessive newlines, use the logs API\n if len(self.messages) > 1 or 'newlines' in self.rules:\n url = await modlog.upload_log(self.messages.values(), actor_id, self.attachments)\n mod_alert_message += f\"A complete log of the offending messages can be found [here]({url})\"\n else:\n mod_alert_message += \"Message:\\n\"\n [message] = self.messages.values()\n content = message.clean_content\n remaining_chars = 2040 - len(mod_alert_message)\n\n if len(content) > remaining_chars:\n content = content[:remaining_chars] + \"...\"\n\n mod_alert_message += f\"{content}\"\n\n *_, last_message = self.messages.values()\n await modlog.send_log_message(\n icon_url=Icons.filtering,\n colour=Colour(Colours.soft_red),\n title=\"Spam detected!\",\n text=mod_alert_message,\n thumbnail=last_message.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n ping_everyone=AntiSpamConfig.ping_everyone\n )\n\n\nclass AntiSpam(Cog):\n \"\"\"Cog that controls our anti-spam measures.\"\"\"\n\n def __init__(self, bot: Bot, validation_errors: Dict[str, str]) -> None:\n self.bot = bot\n self.validation_errors = validation_errors\n role_id = AntiSpamConfig.punishment['role_id']\n self.muted_role = Object(role_id)\n self.expiration_date_converter = Duration()\n\n self.message_deletion_queue = dict()\n\n self.bot.loop.create_task(self.alert_on_validation_error())\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Allows for easy access of the ModLog cog.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n async def alert_on_validation_error(self) -> None:\n \"\"\"Unloads the cog and alerts admins if configuration validation failed.\"\"\"\n await self.bot.wait_until_guild_available()\n if self.validation_errors:\n body = \"**The following errors were encountered:**\\n\"\n body += \"\\n\".join(f\"- {error}\" for error in self.validation_errors.values())\n body += \"\\n\\n**The cog has been unloaded.**\"\n\n await self.mod_log.send_log_message(\n title=\"Error: AntiSpam configuration validation failed!\",\n text=body,\n ping_everyone=True,\n icon_url=Icons.token_removed,\n colour=Colour.red()\n )\n\n self.bot.remove_cog(self.__class__.__name__)\n return\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Applies the antispam rules to each received message.\"\"\"\n if (\n not message.guild\n or message.guild.id != GuildConfig.id\n or message.author.bot\n or (message.channel.id in Filter.channel_whitelist and not DEBUG_MODE)\n or (any(role.id in Filter.role_whitelist for role in message.author.roles) and not DEBUG_MODE)\n ):\n return\n\n # Fetch the rule configuration with the highest rule interval.\n max_interval_config = max(\n AntiSpamConfig.rules.values(),\n key=itemgetter('interval')\n )\n max_interval = max_interval_config['interval']\n\n # Store history messages since `interval` seconds ago in a list to prevent unnecessary API calls.\n earliest_relevant_at = datetime.utcnow() - timedelta(seconds=max_interval)\n relevant_messages = [\n msg async for msg in message.channel.history(after=earliest_relevant_at, oldest_first=False)\n if not msg.author.bot\n ]\n\n for rule_name in AntiSpamConfig.rules:\n rule_config = AntiSpamConfig.rules[rule_name]\n rule_function = RULE_FUNCTION_MAPPING[rule_name]\n\n # Create a list of messages that were sent in the interval that the rule cares about.\n latest_interesting_stamp = datetime.utcnow() - timedelta(seconds=rule_config['interval'])\n messages_for_rule = [\n msg for msg in relevant_messages if msg.created_at > latest_interesting_stamp\n ]\n result = await rule_function(message, messages_for_rule, rule_config)\n\n # If the rule returns `None`, that means the message didn't violate it.\n # If it doesn't, it returns a tuple in the form `(str, Iterable[discord.Member])`\n # which contains the reason for why the message violated the rule and\n # an iterable of all members that violated the rule.\n if result is not None:\n self.bot.stats.incr(f\"mod_alerts.{rule_name}\")\n reason, members, relevant_messages = result\n full_reason = f\"`{rule_name}` rule: {reason}\"\n\n # If there's no spam event going on for this channel, start a new Message Deletion Context\n channel = message.channel\n if channel.id not in self.message_deletion_queue:\n log.trace(f\"Creating queue for channel `{channel.id}`\")\n self.message_deletion_queue[message.channel.id] = DeletionContext(channel)\n self.bot.loop.create_task(self._process_deletion_context(message.channel.id))\n\n # Add the relevant of this trigger to the Deletion Context\n await self.message_deletion_queue[message.channel.id].add(\n rule_name=rule_name,\n members=members,\n messages=relevant_messages\n )\n\n for member in members:\n\n # Fire it off as a background task to ensure\n # that the sleep doesn't block further tasks\n self.bot.loop.create_task(\n self.punish(message, member, full_reason)\n )\n\n await self.maybe_delete_messages(channel, relevant_messages)\n break\n\n async def punish(self, msg: Message, member: Member, reason: str) -> None:\n \"\"\"Punishes the given member for triggering an antispam rule.\"\"\"\n if not any(role.id == self.muted_role.id for role in member.roles):\n remove_role_after = AntiSpamConfig.punishment['remove_after']\n\n # Get context and make sure the bot becomes the actor of infraction by patching the `author` attributes\n context = await self.bot.get_context(msg)\n context.author = self.bot.user\n\n # Since we're going to invoke the tempmute command directly, we need to manually call the converter.\n dt_remove_role_after = await self.expiration_date_converter.convert(context, f\"{remove_role_after}S\")\n await context.invoke(\n self.bot.get_command('tempmute'),\n member,\n dt_remove_role_after,\n reason=reason\n )\n\n async def maybe_delete_messages(self, channel: TextChannel, messages: List[Message]) -> None:\n \"\"\"Cleans the messages if cleaning is configured.\"\"\"\n if AntiSpamConfig.clean_offending:\n # If we have more than one message, we can use bulk delete.\n if len(messages) > 1:\n message_ids = [message.id for message in messages]\n self.mod_log.ignore(Event.message_delete, *message_ids)\n await channel.delete_messages(messages)\n\n # Otherwise, the bulk delete endpoint will throw up.\n # Delete the message directly instead.\n else:\n self.mod_log.ignore(Event.message_delete, messages[0].id)\n try:\n await messages[0].delete()\n except NotFound:\n log.info(f\"Tried to delete message `{messages[0].id}`, but message could not be found.\")\n\n async def _process_deletion_context(self, context_id: int) -> None:\n \"\"\"Processes the Deletion Context queue.\"\"\"\n log.trace(\"Sleeping before processing message deletion queue.\")\n await asyncio.sleep(10)\n\n if context_id not in self.message_deletion_queue:\n log.error(f\"Started processing deletion queue for context `{context_id}`, but it was not found!\")\n return\n\n deletion_context = self.message_deletion_queue.pop(context_id)\n await deletion_context.upload_messages(self.bot.user.id, self.mod_log)\n\n\ndef validate_config(rules_: Mapping = AntiSpamConfig.rules) -> Dict[str, str]:\n \"\"\"Validates the antispam configs.\"\"\"\n validation_errors = {}\n for name, config in rules_.items():\n if name not in RULE_FUNCTION_MAPPING:\n log.error(\n f\"Unrecognized antispam rule `{name}`. \"\n f\"Valid rules are: {', '.join(RULE_FUNCTION_MAPPING)}\"\n )\n validation_errors[name] = f\"`{name}` is not recognized as an antispam rule.\"\n continue\n for required_key in ('interval', 'max'):\n if required_key not in config:\n log.error(\n f\"`{required_key}` is required but was not \"\n f\"set in rule `{name}`'s configuration.\"\n )\n validation_errors[name] = f\"Key `{required_key}` is required but not set for rule `{name}`\"\n return validation_errors\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Validate the AntiSpam configs and load the AntiSpam cog.\"\"\"\n validation_errors = validate_config()\n bot.add_cog(AntiSpam(bot, validation_errors))\n", "path": "bot/exts/filters/antispam.py"}], "after_files": [{"content": "import asyncio\nimport logging\nfrom collections.abc import Mapping\nfrom dataclasses import dataclass, field\nfrom datetime import datetime, timedelta\nfrom operator import attrgetter, itemgetter\nfrom typing import Dict, Iterable, List, Set\n\nfrom discord import Colour, Member, Message, NotFound, Object, TextChannel\nfrom discord.ext.commands import Cog\n\nfrom bot import rules\nfrom bot.bot import Bot\nfrom bot.constants import (\n AntiSpam as AntiSpamConfig, Channels,\n Colours, DEBUG_MODE, Event, Filter,\n Guild as GuildConfig, Icons,\n)\nfrom bot.converters import Duration\nfrom bot.exts.moderation.modlog import ModLog\nfrom bot.utils import lock, scheduling\nfrom bot.utils.messages import format_user, send_attachments\n\n\nlog = logging.getLogger(__name__)\n\nRULE_FUNCTION_MAPPING = {\n 'attachments': rules.apply_attachments,\n 'burst': rules.apply_burst,\n # burst shared is temporarily disabled due to a bug\n # 'burst_shared': rules.apply_burst_shared,\n 'chars': rules.apply_chars,\n 'discord_emojis': rules.apply_discord_emojis,\n 'duplicates': rules.apply_duplicates,\n 'links': rules.apply_links,\n 'mentions': rules.apply_mentions,\n 'newlines': rules.apply_newlines,\n 'role_mentions': rules.apply_role_mentions,\n}\n\n\n@dataclass\nclass DeletionContext:\n \"\"\"Represents a Deletion Context for a single spam event.\"\"\"\n\n channel: TextChannel\n members: Dict[int, Member] = field(default_factory=dict)\n rules: Set[str] = field(default_factory=set)\n messages: Dict[int, Message] = field(default_factory=dict)\n attachments: List[List[str]] = field(default_factory=list)\n\n async def add(self, rule_name: str, members: Iterable[Member], messages: Iterable[Message]) -> None:\n \"\"\"Adds new rule violation events to the deletion context.\"\"\"\n self.rules.add(rule_name)\n\n for member in members:\n if member.id not in self.members:\n self.members[member.id] = member\n\n for message in messages:\n if message.id not in self.messages:\n self.messages[message.id] = message\n\n # Re-upload attachments\n destination = message.guild.get_channel(Channels.attachment_log)\n urls = await send_attachments(message, destination, link_large=False)\n self.attachments.append(urls)\n\n async def upload_messages(self, actor_id: int, modlog: ModLog) -> None:\n \"\"\"Method that takes care of uploading the queue and posting modlog alert.\"\"\"\n triggered_by_users = \", \".join(format_user(m) for m in self.members.values())\n\n mod_alert_message = (\n f\"**Triggered by:** {triggered_by_users}\\n\"\n f\"**Channel:** {self.channel.mention}\\n\"\n f\"**Rules:** {', '.join(rule for rule in self.rules)}\\n\"\n )\n\n # For multiple messages or those with excessive newlines, use the logs API\n if len(self.messages) > 1 or 'newlines' in self.rules:\n url = await modlog.upload_log(self.messages.values(), actor_id, self.attachments)\n mod_alert_message += f\"A complete log of the offending messages can be found [here]({url})\"\n else:\n mod_alert_message += \"Message:\\n\"\n [message] = self.messages.values()\n content = message.clean_content\n remaining_chars = 2040 - len(mod_alert_message)\n\n if len(content) > remaining_chars:\n content = content[:remaining_chars] + \"...\"\n\n mod_alert_message += f\"{content}\"\n\n *_, last_message = self.messages.values()\n await modlog.send_log_message(\n icon_url=Icons.filtering,\n colour=Colour(Colours.soft_red),\n title=\"Spam detected!\",\n text=mod_alert_message,\n thumbnail=last_message.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n ping_everyone=AntiSpamConfig.ping_everyone\n )\n\n\nclass AntiSpam(Cog):\n \"\"\"Cog that controls our anti-spam measures.\"\"\"\n\n def __init__(self, bot: Bot, validation_errors: Dict[str, str]) -> None:\n self.bot = bot\n self.validation_errors = validation_errors\n role_id = AntiSpamConfig.punishment['role_id']\n self.muted_role = Object(role_id)\n self.expiration_date_converter = Duration()\n\n self.message_deletion_queue = dict()\n\n self.bot.loop.create_task(self.alert_on_validation_error(), name=\"AntiSpam.alert_on_validation_error\")\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Allows for easy access of the ModLog cog.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n async def alert_on_validation_error(self) -> None:\n \"\"\"Unloads the cog and alerts admins if configuration validation failed.\"\"\"\n await self.bot.wait_until_guild_available()\n if self.validation_errors:\n body = \"**The following errors were encountered:**\\n\"\n body += \"\\n\".join(f\"- {error}\" for error in self.validation_errors.values())\n body += \"\\n\\n**The cog has been unloaded.**\"\n\n await self.mod_log.send_log_message(\n title=\"Error: AntiSpam configuration validation failed!\",\n text=body,\n ping_everyone=True,\n icon_url=Icons.token_removed,\n colour=Colour.red()\n )\n\n self.bot.remove_cog(self.__class__.__name__)\n return\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Applies the antispam rules to each received message.\"\"\"\n if (\n not message.guild\n or message.guild.id != GuildConfig.id\n or message.author.bot\n or (message.channel.id in Filter.channel_whitelist and not DEBUG_MODE)\n or (any(role.id in Filter.role_whitelist for role in message.author.roles) and not DEBUG_MODE)\n ):\n return\n\n # Fetch the rule configuration with the highest rule interval.\n max_interval_config = max(\n AntiSpamConfig.rules.values(),\n key=itemgetter('interval')\n )\n max_interval = max_interval_config['interval']\n\n # Store history messages since `interval` seconds ago in a list to prevent unnecessary API calls.\n earliest_relevant_at = datetime.utcnow() - timedelta(seconds=max_interval)\n relevant_messages = [\n msg async for msg in message.channel.history(after=earliest_relevant_at, oldest_first=False)\n if not msg.author.bot\n ]\n\n for rule_name in AntiSpamConfig.rules:\n rule_config = AntiSpamConfig.rules[rule_name]\n rule_function = RULE_FUNCTION_MAPPING[rule_name]\n\n # Create a list of messages that were sent in the interval that the rule cares about.\n latest_interesting_stamp = datetime.utcnow() - timedelta(seconds=rule_config['interval'])\n messages_for_rule = [\n msg for msg in relevant_messages if msg.created_at > latest_interesting_stamp\n ]\n result = await rule_function(message, messages_for_rule, rule_config)\n\n # If the rule returns `None`, that means the message didn't violate it.\n # If it doesn't, it returns a tuple in the form `(str, Iterable[discord.Member])`\n # which contains the reason for why the message violated the rule and\n # an iterable of all members that violated the rule.\n if result is not None:\n self.bot.stats.incr(f\"mod_alerts.{rule_name}\")\n reason, members, relevant_messages = result\n full_reason = f\"`{rule_name}` rule: {reason}\"\n\n # If there's no spam event going on for this channel, start a new Message Deletion Context\n channel = message.channel\n if channel.id not in self.message_deletion_queue:\n log.trace(f\"Creating queue for channel `{channel.id}`\")\n self.message_deletion_queue[message.channel.id] = DeletionContext(channel)\n scheduling.create_task(\n self._process_deletion_context(message.channel.id),\n name=f\"AntiSpam._process_deletion_context({message.channel.id})\"\n )\n\n # Add the relevant of this trigger to the Deletion Context\n await self.message_deletion_queue[message.channel.id].add(\n rule_name=rule_name,\n members=members,\n messages=relevant_messages\n )\n\n for member in members:\n scheduling.create_task(\n self.punish(message, member, full_reason),\n name=f\"AntiSpam.punish(message={message.id}, member={member.id}, rule={rule_name})\"\n )\n\n await self.maybe_delete_messages(channel, relevant_messages)\n break\n\n @lock.lock_arg(\"antispam.punish\", \"member\", attrgetter(\"id\"))\n async def punish(self, msg: Message, member: Member, reason: str) -> None:\n \"\"\"Punishes the given member for triggering an antispam rule.\"\"\"\n if not any(role.id == self.muted_role.id for role in member.roles):\n remove_role_after = AntiSpamConfig.punishment['remove_after']\n\n # Get context and make sure the bot becomes the actor of infraction by patching the `author` attributes\n context = await self.bot.get_context(msg)\n context.author = self.bot.user\n\n # Since we're going to invoke the tempmute command directly, we need to manually call the converter.\n dt_remove_role_after = await self.expiration_date_converter.convert(context, f\"{remove_role_after}S\")\n await context.invoke(\n self.bot.get_command('tempmute'),\n member,\n dt_remove_role_after,\n reason=reason\n )\n\n async def maybe_delete_messages(self, channel: TextChannel, messages: List[Message]) -> None:\n \"\"\"Cleans the messages if cleaning is configured.\"\"\"\n if AntiSpamConfig.clean_offending:\n # If we have more than one message, we can use bulk delete.\n if len(messages) > 1:\n message_ids = [message.id for message in messages]\n self.mod_log.ignore(Event.message_delete, *message_ids)\n await channel.delete_messages(messages)\n\n # Otherwise, the bulk delete endpoint will throw up.\n # Delete the message directly instead.\n else:\n self.mod_log.ignore(Event.message_delete, messages[0].id)\n try:\n await messages[0].delete()\n except NotFound:\n log.info(f\"Tried to delete message `{messages[0].id}`, but message could not be found.\")\n\n async def _process_deletion_context(self, context_id: int) -> None:\n \"\"\"Processes the Deletion Context queue.\"\"\"\n log.trace(\"Sleeping before processing message deletion queue.\")\n await asyncio.sleep(10)\n\n if context_id not in self.message_deletion_queue:\n log.error(f\"Started processing deletion queue for context `{context_id}`, but it was not found!\")\n return\n\n deletion_context = self.message_deletion_queue.pop(context_id)\n await deletion_context.upload_messages(self.bot.user.id, self.mod_log)\n\n\ndef validate_config(rules_: Mapping = AntiSpamConfig.rules) -> Dict[str, str]:\n \"\"\"Validates the antispam configs.\"\"\"\n validation_errors = {}\n for name, config in rules_.items():\n if name not in RULE_FUNCTION_MAPPING:\n log.error(\n f\"Unrecognized antispam rule `{name}`. \"\n f\"Valid rules are: {', '.join(RULE_FUNCTION_MAPPING)}\"\n )\n validation_errors[name] = f\"`{name}` is not recognized as an antispam rule.\"\n continue\n for required_key in ('interval', 'max'):\n if required_key not in config:\n log.error(\n f\"`{required_key}` is required but was not \"\n f\"set in rule `{name}`'s configuration.\"\n )\n validation_errors[name] = f\"Key `{required_key}` is required but not set for rule `{name}`\"\n return validation_errors\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Validate the AntiSpam configs and load the AntiSpam cog.\"\"\"\n validation_errors = validate_config()\n bot.add_cog(AntiSpam(bot, validation_errors))\n", "path": "bot/exts/filters/antispam.py"}]} | 3,747 | 617 |
gh_patches_debug_39505 | rasdani/github-patches | git_diff | Qiskit__qiskit-979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Calling IBMQ.load_accounts() twice should give warning, not exception
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Informations
- **Qiskit Terra version**: latest master
- **Python version**: 3.7
- **Operating system**: OSX
### What is the current behavior?
Calling `IBMQ.load_accounts()` twice gives an Exception. However, in notebooks, it is common to just run multiple cells (or whole notebooks) repeatedly. Raising an exception breaks this convenient workflow. A similar issue, #673 , was raised and corrected, previously.
### Steps to reproduce the problem
### What is the expected behavior?
### Suggested solutions
Make repeated calls to `IBMQ.load_accounts()` give a warning instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/backends/ibmq/ibmqprovider.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2018, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 """Provider for remote IBMQ backends with admin features."""
9
10 from collections import OrderedDict
11
12 from qiskit.backends import BaseProvider
13
14 from .credentials._configrc import remove_credentials
15 from .credentials import (Credentials,
16 read_credentials_from_qiskitrc, store_credentials, discover_credentials)
17 from .ibmqaccounterror import IBMQAccountError
18 from .ibmqsingleprovider import IBMQSingleProvider
19
20 QE_URL = 'https://quantumexperience.ng.bluemix.net/api'
21
22
23 class IBMQProvider(BaseProvider):
24 """Provider for remote IBMQ backends with admin features.
25
26 This class is the entry point for handling backends from IBMQ, allowing
27 using different accounts.
28 """
29 def __init__(self):
30 super().__init__()
31
32 # dict[credentials_unique_id: IBMQSingleProvider]
33 # This attribute stores a reference to the different accounts. The
34 # keys are tuples (hub, group, project), as the convention is that
35 # that tuple uniquely identifies a set of credentials.
36 self._accounts = OrderedDict()
37
38 def backends(self, name=None, filters=None, **kwargs):
39 # pylint: disable=arguments-differ
40
41 # Special handling of the credentials filters.
42 credentials_filter = {}
43 for key in ['token', 'url', 'hub', 'group', 'project']:
44 if key in kwargs:
45 credentials_filter[key] = kwargs.pop(key)
46 providers = [provider for provider in self._accounts.values() if
47 self._match_all(provider.credentials, credentials_filter)]
48
49 # Special handling of the `name` parameter, to support alias resolution.
50 if name:
51 aliases = self.aliased_backend_names()
52 aliases.update(self.deprecated_backend_names())
53 name = aliases.get(name, name)
54
55 # Aggregate the list of filtered backends.
56 backends = []
57 for provider in providers:
58 backends = backends + provider.backends(
59 name=name, filters=filters, **kwargs)
60
61 return backends
62
63 @staticmethod
64 def deprecated_backend_names():
65 """Returns deprecated backend names."""
66 return {
67 'ibmqx_qasm_simulator': 'ibmq_qasm_simulator',
68 'ibmqx_hpc_qasm_simulator': 'ibmq_qasm_simulator',
69 'real': 'ibmqx1'
70 }
71
72 @staticmethod
73 def aliased_backend_names():
74 """Returns aliased backend names."""
75 return {
76 'ibmq_5_yorktown': 'ibmqx2',
77 'ibmq_5_tenerife': 'ibmqx4',
78 'ibmq_16_rueschlikon': 'ibmqx5',
79 'ibmq_20_austin': 'QS1_1'
80 }
81
82 def add_account(self, token, url=QE_URL, **kwargs):
83 """Authenticate against IBMQ and store the account for future use.
84
85 Login into Quantum Experience or IBMQ using the provided credentials,
86 adding the account to the current session. The account is stored in
87 disk for future use.
88
89 Args:
90 token (str): Quantum Experience or IBM Q API token.
91 url (str): URL for Quantum Experience or IBM Q (for IBM Q,
92 including the hub, group and project in the URL).
93 **kwargs (dict):
94 * proxies (dict): Proxy configuration for the API.
95 * verify (bool): If False, ignores SSL certificates errors
96
97 Raises:
98 IBMQAccountError: if the credentials are already in use.
99 """
100 credentials = Credentials(token, url, **kwargs)
101
102 # Check if duplicated credentials are already stored. By convention,
103 # we assume (hub, group, project) is always unique.
104 stored_credentials = read_credentials_from_qiskitrc()
105
106 if credentials.unique_id() in stored_credentials.keys():
107 raise IBMQAccountError('Credentials are already stored')
108
109 self._append_account(credentials)
110
111 # Store the credentials back to disk.
112 store_credentials(credentials)
113
114 def remove_account(self, token, url=QE_URL, **kwargs):
115 """Remove an account from the session and from disk.
116
117 Args:
118 token (str): Quantum Experience or IBM Q API token.
119 url (str): URL for Quantum Experience or IBM Q (for IBM Q,
120 including the hub, group and project in the URL).
121 **kwargs (dict):
122 * proxies (dict): Proxy configuration for the API.
123 * verify (bool): If False, ignores SSL certificates errors
124
125 Raises:
126 IBMQAccountError: if the credentials could not be removed.
127 """
128 removed = False
129 credentials = Credentials(token, url, **kwargs)
130
131 # Check if the credentials are already stored in session or disk. By
132 # convention, we assume (hub, group, project) is always unique.
133 stored_credentials = read_credentials_from_qiskitrc()
134
135 # Try to remove from session.
136 if credentials.unique_id() in self._accounts.keys():
137 del self._accounts[credentials.unique_id()]
138 removed = True
139
140 # Try to remove from disk.
141 if credentials.unique_id() in stored_credentials.keys():
142 remove_credentials(credentials)
143 removed = True
144
145 if not removed:
146 raise IBMQAccountError('Unable to find credentials')
147
148 def remove_accounts(self):
149 """Remove all accounts from this session and optionally from disk."""
150 current_creds = self._accounts.copy()
151 for creds in current_creds:
152 self.remove_account(current_creds[creds].credentials.token,
153 current_creds[creds].credentials.url)
154
155 def use_account(self, token, url=QE_URL, **kwargs):
156 """Authenticate against IBMQ during this session.
157
158 Login into Quantum Experience or IBMQ using the provided credentials,
159 adding the account to the current session. The account is not stored
160 in disk.
161
162 Args:
163 token (str): Quantum Experience or IBM Q API token.
164 url (str): URL for Quantum Experience or IBM Q (for IBM Q,
165 including the hub, group and project in the URL).
166 **kwargs (dict):
167 * proxies (dict): Proxy configuration for the API.
168 * verify (bool): If False, ignores SSL certificates errors
169 """
170 credentials = Credentials(token, url, **kwargs)
171
172 self._append_account(credentials)
173
174 def list_accounts(self):
175 """List all accounts currently stored in the session.
176
177 Returns:
178 list[dict]: a list with information about the accounts currently
179 in the session.
180 """
181 information = []
182 for provider in self._accounts.values():
183 information.append({
184 'token': provider.credentials.token,
185 'url': provider.credentials.url,
186 })
187
188 return information
189
190 def load_accounts(self, **kwargs):
191 """Load IBMQ accounts found in the system, subject to optional filtering.
192
193 Automatically load the accounts found in the system. This method
194 looks for credentials in the following locations, in order, and
195 returns as soon as credentials are found:
196
197 1. in the `Qconfig.py` file in the current working directory.
198 2. in the environment variables.
199 3. in the `qiskitrc` configuration file
200
201 Raises:
202 IBMQAccountError: if attempting to load previously loaded accounts,
203 or if no credentials can be found.
204 """
205 # Special handling of the credentials filters.
206 credentials_filter = {}
207 for key in ['token', 'url', 'hub', 'group', 'project']:
208 if key in kwargs:
209 credentials_filter[key] = kwargs.pop(key)
210
211 for credentials in discover_credentials().values():
212 if self._match_all(credentials, credentials_filter):
213 self._append_account(credentials)
214
215 if not self._accounts:
216 raise IBMQAccountError('No IBMQ credentials found.')
217
218 def _append_account(self, credentials):
219 """Append an account with the specified credentials to the session.
220
221 Args:
222 credentials (Credentials): set of credentials.
223
224 Returns:
225 IBMQSingleProvider: new single-account provider.
226
227 Raises:
228 IBMQAccountError: if the provider could not be appended.
229 """
230 # Check if duplicated credentials are already in use. By convention,
231 # we assume (hub, group, project) is always unique.
232 if credentials.unique_id() in self._accounts.keys():
233 raise IBMQAccountError('Credentials are already in use')
234
235 single_provider = IBMQSingleProvider(credentials, self)
236 self._accounts[credentials.unique_id()] = single_provider
237
238 return single_provider
239
240 def _match_all(self, obj, criteria):
241 """Return True if all items in criteria matches items in obj."""
242 return all(getattr(obj, key_, None) == value_ for
243 key_, value_ in criteria.items())
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/backends/ibmq/ibmqprovider.py b/qiskit/backends/ibmq/ibmqprovider.py
--- a/qiskit/backends/ibmq/ibmqprovider.py
+++ b/qiskit/backends/ibmq/ibmqprovider.py
@@ -7,6 +7,7 @@
"""Provider for remote IBMQ backends with admin features."""
+import warnings
from collections import OrderedDict
from qiskit.backends import BaseProvider
@@ -93,9 +94,6 @@
**kwargs (dict):
* proxies (dict): Proxy configuration for the API.
* verify (bool): If False, ignores SSL certificates errors
-
- Raises:
- IBMQAccountError: if the credentials are already in use.
"""
credentials = Credentials(token, url, **kwargs)
@@ -104,12 +102,12 @@
stored_credentials = read_credentials_from_qiskitrc()
if credentials.unique_id() in stored_credentials.keys():
- raise IBMQAccountError('Credentials are already stored')
-
- self._append_account(credentials)
+ warnings.warn('Credentials are already stored')
+ else:
+ self._append_account(credentials)
- # Store the credentials back to disk.
- store_credentials(credentials)
+ # Store the credentials back to disk.
+ store_credentials(credentials)
def remove_account(self, token, url=QE_URL, **kwargs):
"""Remove an account from the session and from disk.
@@ -199,8 +197,7 @@
3. in the `qiskitrc` configuration file
Raises:
- IBMQAccountError: if attempting to load previously loaded accounts,
- or if no credentials can be found.
+ IBMQAccountError: if no credentials are found.
"""
# Special handling of the credentials filters.
credentials_filter = {}
@@ -223,14 +220,11 @@
Returns:
IBMQSingleProvider: new single-account provider.
-
- Raises:
- IBMQAccountError: if the provider could not be appended.
"""
# Check if duplicated credentials are already in use. By convention,
# we assume (hub, group, project) is always unique.
if credentials.unique_id() in self._accounts.keys():
- raise IBMQAccountError('Credentials are already in use')
+ warnings.warn('Credentials are already in use.')
single_provider = IBMQSingleProvider(credentials, self)
self._accounts[credentials.unique_id()] = single_provider
| {"golden_diff": "diff --git a/qiskit/backends/ibmq/ibmqprovider.py b/qiskit/backends/ibmq/ibmqprovider.py\n--- a/qiskit/backends/ibmq/ibmqprovider.py\n+++ b/qiskit/backends/ibmq/ibmqprovider.py\n@@ -7,6 +7,7 @@\n \n \"\"\"Provider for remote IBMQ backends with admin features.\"\"\"\n \n+import warnings\n from collections import OrderedDict\n \n from qiskit.backends import BaseProvider\n@@ -93,9 +94,6 @@\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n-\n- Raises:\n- IBMQAccountError: if the credentials are already in use.\n \"\"\"\n credentials = Credentials(token, url, **kwargs)\n \n@@ -104,12 +102,12 @@\n stored_credentials = read_credentials_from_qiskitrc()\n \n if credentials.unique_id() in stored_credentials.keys():\n- raise IBMQAccountError('Credentials are already stored')\n-\n- self._append_account(credentials)\n+ warnings.warn('Credentials are already stored')\n+ else:\n+ self._append_account(credentials)\n \n- # Store the credentials back to disk.\n- store_credentials(credentials)\n+ # Store the credentials back to disk.\n+ store_credentials(credentials)\n \n def remove_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Remove an account from the session and from disk.\n@@ -199,8 +197,7 @@\n 3. in the `qiskitrc` configuration file\n \n Raises:\n- IBMQAccountError: if attempting to load previously loaded accounts,\n- or if no credentials can be found.\n+ IBMQAccountError: if no credentials are found.\n \"\"\"\n # Special handling of the credentials filters.\n credentials_filter = {}\n@@ -223,14 +220,11 @@\n \n Returns:\n IBMQSingleProvider: new single-account provider.\n-\n- Raises:\n- IBMQAccountError: if the provider could not be appended.\n \"\"\"\n # Check if duplicated credentials are already in use. By convention,\n # we assume (hub, group, project) is always unique.\n if credentials.unique_id() in self._accounts.keys():\n- raise IBMQAccountError('Credentials are already in use')\n+ warnings.warn('Credentials are already in use.')\n \n single_provider = IBMQSingleProvider(credentials, self)\n self._accounts[credentials.unique_id()] = single_provider\n", "issue": "Calling IBMQ.load_accounts() twice should give warning, not exception\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Informations\r\n\r\n- **Qiskit Terra version**: latest master\r\n- **Python version**: 3.7 \r\n- **Operating system**: OSX\r\n\r\n### What is the current behavior?\r\nCalling `IBMQ.load_accounts()` twice gives an Exception. However, in notebooks, it is common to just run multiple cells (or whole notebooks) repeatedly. Raising an exception breaks this convenient workflow. A similar issue, #673 , was raised and corrected, previously.\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\nMake repeated calls to `IBMQ.load_accounts()` give a warning instead.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Provider for remote IBMQ backends with admin features.\"\"\"\n\nfrom collections import OrderedDict\n\nfrom qiskit.backends import BaseProvider\n\nfrom .credentials._configrc import remove_credentials\nfrom .credentials import (Credentials,\n read_credentials_from_qiskitrc, store_credentials, discover_credentials)\nfrom .ibmqaccounterror import IBMQAccountError\nfrom .ibmqsingleprovider import IBMQSingleProvider\n\nQE_URL = 'https://quantumexperience.ng.bluemix.net/api'\n\n\nclass IBMQProvider(BaseProvider):\n \"\"\"Provider for remote IBMQ backends with admin features.\n\n This class is the entry point for handling backends from IBMQ, allowing\n using different accounts.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n # dict[credentials_unique_id: IBMQSingleProvider]\n # This attribute stores a reference to the different accounts. The\n # keys are tuples (hub, group, project), as the convention is that\n # that tuple uniquely identifies a set of credentials.\n self._accounts = OrderedDict()\n\n def backends(self, name=None, filters=None, **kwargs):\n # pylint: disable=arguments-differ\n\n # Special handling of the credentials filters.\n credentials_filter = {}\n for key in ['token', 'url', 'hub', 'group', 'project']:\n if key in kwargs:\n credentials_filter[key] = kwargs.pop(key)\n providers = [provider for provider in self._accounts.values() if\n self._match_all(provider.credentials, credentials_filter)]\n\n # Special handling of the `name` parameter, to support alias resolution.\n if name:\n aliases = self.aliased_backend_names()\n aliases.update(self.deprecated_backend_names())\n name = aliases.get(name, name)\n\n # Aggregate the list of filtered backends.\n backends = []\n for provider in providers:\n backends = backends + provider.backends(\n name=name, filters=filters, **kwargs)\n\n return backends\n\n @staticmethod\n def deprecated_backend_names():\n \"\"\"Returns deprecated backend names.\"\"\"\n return {\n 'ibmqx_qasm_simulator': 'ibmq_qasm_simulator',\n 'ibmqx_hpc_qasm_simulator': 'ibmq_qasm_simulator',\n 'real': 'ibmqx1'\n }\n\n @staticmethod\n def aliased_backend_names():\n \"\"\"Returns aliased backend names.\"\"\"\n return {\n 'ibmq_5_yorktown': 'ibmqx2',\n 'ibmq_5_tenerife': 'ibmqx4',\n 'ibmq_16_rueschlikon': 'ibmqx5',\n 'ibmq_20_austin': 'QS1_1'\n }\n\n def add_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Authenticate against IBMQ and store the account for future use.\n\n Login into Quantum Experience or IBMQ using the provided credentials,\n adding the account to the current session. The account is stored in\n disk for future use.\n\n Args:\n token (str): Quantum Experience or IBM Q API token.\n url (str): URL for Quantum Experience or IBM Q (for IBM Q,\n including the hub, group and project in the URL).\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n\n Raises:\n IBMQAccountError: if the credentials are already in use.\n \"\"\"\n credentials = Credentials(token, url, **kwargs)\n\n # Check if duplicated credentials are already stored. By convention,\n # we assume (hub, group, project) is always unique.\n stored_credentials = read_credentials_from_qiskitrc()\n\n if credentials.unique_id() in stored_credentials.keys():\n raise IBMQAccountError('Credentials are already stored')\n\n self._append_account(credentials)\n\n # Store the credentials back to disk.\n store_credentials(credentials)\n\n def remove_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Remove an account from the session and from disk.\n\n Args:\n token (str): Quantum Experience or IBM Q API token.\n url (str): URL for Quantum Experience or IBM Q (for IBM Q,\n including the hub, group and project in the URL).\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n\n Raises:\n IBMQAccountError: if the credentials could not be removed.\n \"\"\"\n removed = False\n credentials = Credentials(token, url, **kwargs)\n\n # Check if the credentials are already stored in session or disk. By\n # convention, we assume (hub, group, project) is always unique.\n stored_credentials = read_credentials_from_qiskitrc()\n\n # Try to remove from session.\n if credentials.unique_id() in self._accounts.keys():\n del self._accounts[credentials.unique_id()]\n removed = True\n\n # Try to remove from disk.\n if credentials.unique_id() in stored_credentials.keys():\n remove_credentials(credentials)\n removed = True\n\n if not removed:\n raise IBMQAccountError('Unable to find credentials')\n\n def remove_accounts(self):\n \"\"\"Remove all accounts from this session and optionally from disk.\"\"\"\n current_creds = self._accounts.copy()\n for creds in current_creds:\n self.remove_account(current_creds[creds].credentials.token,\n current_creds[creds].credentials.url)\n\n def use_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Authenticate against IBMQ during this session.\n\n Login into Quantum Experience or IBMQ using the provided credentials,\n adding the account to the current session. The account is not stored\n in disk.\n\n Args:\n token (str): Quantum Experience or IBM Q API token.\n url (str): URL for Quantum Experience or IBM Q (for IBM Q,\n including the hub, group and project in the URL).\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n \"\"\"\n credentials = Credentials(token, url, **kwargs)\n\n self._append_account(credentials)\n\n def list_accounts(self):\n \"\"\"List all accounts currently stored in the session.\n\n Returns:\n list[dict]: a list with information about the accounts currently\n in the session.\n \"\"\"\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information\n\n def load_accounts(self, **kwargs):\n \"\"\"Load IBMQ accounts found in the system, subject to optional filtering.\n\n Automatically load the accounts found in the system. This method\n looks for credentials in the following locations, in order, and\n returns as soon as credentials are found:\n\n 1. in the `Qconfig.py` file in the current working directory.\n 2. in the environment variables.\n 3. in the `qiskitrc` configuration file\n\n Raises:\n IBMQAccountError: if attempting to load previously loaded accounts,\n or if no credentials can be found.\n \"\"\"\n # Special handling of the credentials filters.\n credentials_filter = {}\n for key in ['token', 'url', 'hub', 'group', 'project']:\n if key in kwargs:\n credentials_filter[key] = kwargs.pop(key)\n\n for credentials in discover_credentials().values():\n if self._match_all(credentials, credentials_filter):\n self._append_account(credentials)\n\n if not self._accounts:\n raise IBMQAccountError('No IBMQ credentials found.')\n\n def _append_account(self, credentials):\n \"\"\"Append an account with the specified credentials to the session.\n\n Args:\n credentials (Credentials): set of credentials.\n\n Returns:\n IBMQSingleProvider: new single-account provider.\n\n Raises:\n IBMQAccountError: if the provider could not be appended.\n \"\"\"\n # Check if duplicated credentials are already in use. By convention,\n # we assume (hub, group, project) is always unique.\n if credentials.unique_id() in self._accounts.keys():\n raise IBMQAccountError('Credentials are already in use')\n\n single_provider = IBMQSingleProvider(credentials, self)\n self._accounts[credentials.unique_id()] = single_provider\n\n return single_provider\n\n def _match_all(self, obj, criteria):\n \"\"\"Return True if all items in criteria matches items in obj.\"\"\"\n return all(getattr(obj, key_, None) == value_ for\n key_, value_ in criteria.items())\n", "path": "qiskit/backends/ibmq/ibmqprovider.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Provider for remote IBMQ backends with admin features.\"\"\"\n\nimport warnings\nfrom collections import OrderedDict\n\nfrom qiskit.backends import BaseProvider\n\nfrom .credentials._configrc import remove_credentials\nfrom .credentials import (Credentials,\n read_credentials_from_qiskitrc, store_credentials, discover_credentials)\nfrom .ibmqaccounterror import IBMQAccountError\nfrom .ibmqsingleprovider import IBMQSingleProvider\n\nQE_URL = 'https://quantumexperience.ng.bluemix.net/api'\n\n\nclass IBMQProvider(BaseProvider):\n \"\"\"Provider for remote IBMQ backends with admin features.\n\n This class is the entry point for handling backends from IBMQ, allowing\n using different accounts.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n # dict[credentials_unique_id: IBMQSingleProvider]\n # This attribute stores a reference to the different accounts. The\n # keys are tuples (hub, group, project), as the convention is that\n # that tuple uniquely identifies a set of credentials.\n self._accounts = OrderedDict()\n\n def backends(self, name=None, filters=None, **kwargs):\n # pylint: disable=arguments-differ\n\n # Special handling of the credentials filters.\n credentials_filter = {}\n for key in ['token', 'url', 'hub', 'group', 'project']:\n if key in kwargs:\n credentials_filter[key] = kwargs.pop(key)\n providers = [provider for provider in self._accounts.values() if\n self._match_all(provider.credentials, credentials_filter)]\n\n # Special handling of the `name` parameter, to support alias resolution.\n if name:\n aliases = self.aliased_backend_names()\n aliases.update(self.deprecated_backend_names())\n name = aliases.get(name, name)\n\n # Aggregate the list of filtered backends.\n backends = []\n for provider in providers:\n backends = backends + provider.backends(\n name=name, filters=filters, **kwargs)\n\n return backends\n\n @staticmethod\n def deprecated_backend_names():\n \"\"\"Returns deprecated backend names.\"\"\"\n return {\n 'ibmqx_qasm_simulator': 'ibmq_qasm_simulator',\n 'ibmqx_hpc_qasm_simulator': 'ibmq_qasm_simulator',\n 'real': 'ibmqx1'\n }\n\n @staticmethod\n def aliased_backend_names():\n \"\"\"Returns aliased backend names.\"\"\"\n return {\n 'ibmq_5_yorktown': 'ibmqx2',\n 'ibmq_5_tenerife': 'ibmqx4',\n 'ibmq_16_rueschlikon': 'ibmqx5',\n 'ibmq_20_austin': 'QS1_1'\n }\n\n def add_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Authenticate against IBMQ and store the account for future use.\n\n Login into Quantum Experience or IBMQ using the provided credentials,\n adding the account to the current session. The account is stored in\n disk for future use.\n\n Args:\n token (str): Quantum Experience or IBM Q API token.\n url (str): URL for Quantum Experience or IBM Q (for IBM Q,\n including the hub, group and project in the URL).\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n \"\"\"\n credentials = Credentials(token, url, **kwargs)\n\n # Check if duplicated credentials are already stored. By convention,\n # we assume (hub, group, project) is always unique.\n stored_credentials = read_credentials_from_qiskitrc()\n\n if credentials.unique_id() in stored_credentials.keys():\n warnings.warn('Credentials are already stored')\n else:\n self._append_account(credentials)\n\n # Store the credentials back to disk.\n store_credentials(credentials)\n\n def remove_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Remove an account from the session and from disk.\n\n Args:\n token (str): Quantum Experience or IBM Q API token.\n url (str): URL for Quantum Experience or IBM Q (for IBM Q,\n including the hub, group and project in the URL).\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n\n Raises:\n IBMQAccountError: if the credentials could not be removed.\n \"\"\"\n removed = False\n credentials = Credentials(token, url, **kwargs)\n\n # Check if the credentials are already stored in session or disk. By\n # convention, we assume (hub, group, project) is always unique.\n stored_credentials = read_credentials_from_qiskitrc()\n\n # Try to remove from session.\n if credentials.unique_id() in self._accounts.keys():\n del self._accounts[credentials.unique_id()]\n removed = True\n\n # Try to remove from disk.\n if credentials.unique_id() in stored_credentials.keys():\n remove_credentials(credentials)\n removed = True\n\n if not removed:\n raise IBMQAccountError('Unable to find credentials')\n\n def remove_accounts(self):\n \"\"\"Remove all accounts from this session and optionally from disk.\"\"\"\n current_creds = self._accounts.copy()\n for creds in current_creds:\n self.remove_account(current_creds[creds].credentials.token,\n current_creds[creds].credentials.url)\n\n def use_account(self, token, url=QE_URL, **kwargs):\n \"\"\"Authenticate against IBMQ during this session.\n\n Login into Quantum Experience or IBMQ using the provided credentials,\n adding the account to the current session. The account is not stored\n in disk.\n\n Args:\n token (str): Quantum Experience or IBM Q API token.\n url (str): URL for Quantum Experience or IBM Q (for IBM Q,\n including the hub, group and project in the URL).\n **kwargs (dict):\n * proxies (dict): Proxy configuration for the API.\n * verify (bool): If False, ignores SSL certificates errors\n \"\"\"\n credentials = Credentials(token, url, **kwargs)\n\n self._append_account(credentials)\n\n def list_accounts(self):\n \"\"\"List all accounts currently stored in the session.\n\n Returns:\n list[dict]: a list with information about the accounts currently\n in the session.\n \"\"\"\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information\n\n def load_accounts(self, **kwargs):\n \"\"\"Load IBMQ accounts found in the system, subject to optional filtering.\n\n Automatically load the accounts found in the system. This method\n looks for credentials in the following locations, in order, and\n returns as soon as credentials are found:\n\n 1. in the `Qconfig.py` file in the current working directory.\n 2. in the environment variables.\n 3. in the `qiskitrc` configuration file\n\n Raises:\n IBMQAccountError: if no credentials are found.\n \"\"\"\n # Special handling of the credentials filters.\n credentials_filter = {}\n for key in ['token', 'url', 'hub', 'group', 'project']:\n if key in kwargs:\n credentials_filter[key] = kwargs.pop(key)\n\n for credentials in discover_credentials().values():\n if self._match_all(credentials, credentials_filter):\n self._append_account(credentials)\n\n if not self._accounts:\n raise IBMQAccountError('No IBMQ credentials found.')\n\n def _append_account(self, credentials):\n \"\"\"Append an account with the specified credentials to the session.\n\n Args:\n credentials (Credentials): set of credentials.\n\n Returns:\n IBMQSingleProvider: new single-account provider.\n \"\"\"\n # Check if duplicated credentials are already in use. By convention,\n # we assume (hub, group, project) is always unique.\n if credentials.unique_id() in self._accounts.keys():\n warnings.warn('Credentials are already in use.')\n\n single_provider = IBMQSingleProvider(credentials, self)\n self._accounts[credentials.unique_id()] = single_provider\n\n return single_provider\n\n def _match_all(self, obj, criteria):\n \"\"\"Return True if all items in criteria matches items in obj.\"\"\"\n return all(getattr(obj, key_, None) == value_ for\n key_, value_ in criteria.items())\n", "path": "qiskit/backends/ibmq/ibmqprovider.py"}]} | 2,998 | 561 |
gh_patches_debug_11353 | rasdani/github-patches | git_diff | iterative__dvc-951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Testing against Python 3.6 and 3.7
https://github.com/pyinstaller/pyinstaller#main-advantages says that pyinstaller supports these versions of Python but https://github.com/iterative/dvc/blob/master/.travis.yml#L9 says the opposite.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/remote/base.py`
Content:
```
1 import os
2 import re
3
4 from dvc.config import Config
5 from dvc.logger import Logger
6 from dvc.exceptions import DvcException
7
8
9 STATUS_OK = 1
10 STATUS_NEW = 3
11 STATUS_DELETED = 4
12
13
14 STATUS_MAP = {
15 # (local_exists, remote_exists)
16 (True, True): STATUS_OK,
17 (False, False): STATUS_OK,
18 (True, False): STATUS_NEW,
19 (False, True): STATUS_DELETED,
20 }
21
22
23 class DataCloudError(DvcException):
24 """ Data Cloud exception """
25 def __init__(self, msg):
26 super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))
27
28
29 class RemoteBase(object):
30 REGEX = None
31 REQUIRES = {}
32
33 def __init__(self, project, config):
34 pass
35
36 @classmethod
37 def supported(cls, config):
38 url = config[Config.SECTION_REMOTE_URL]
39 url_ok = cls.match(url)
40 deps_ok = all(cls.REQUIRES.values())
41 if url_ok and not deps_ok:
42 missing = [k for k, v in cls.REQUIRES.items() if v is None]
43 msg = "URL \'{}\' is supported but requires " \
44 "these missing dependencies: {}"
45 Logger.warn(msg.format(url, str(missing)))
46 return url_ok and deps_ok
47
48 @classmethod
49 def match(cls, url):
50 return re.match(cls.REGEX, url)
51
52 def group(self, name):
53 m = self.match(self.url)
54 if not m:
55 return None
56 return m.group(name)
57
58 @staticmethod
59 def tmp_file(fname):
60 """ Temporary name for a partial download """
61 # FIXME probably better use uuid()
62 return fname + '.part'
63
64 def save_info(self, path_info):
65 raise NotImplementedError
66
67 def save(self, path_info):
68 raise NotImplementedError
69
70 def checkout(self, path_info, checksum_info):
71 raise NotImplementedError
72
73 def download(self, from_infos, to_infos, no_progress_bar=False, name=None):
74 raise NotImplementedError
75
76 def upload(self, from_infos, to_infos, path_info, name=None):
77 raise NotImplementedError
78
79 def remove(self, path_info):
80 raise NotImplementedError
81
82 def move(self, path_info):
83 raise NotImplementedError
84
85 def _makedirs(self, fname):
86 dname = os.path.dirname(fname)
87 try:
88 os.makedirs(dname)
89 except OSError as e:
90 if e.errno != os.errno.EEXIST:
91 raise
92
93 def md5s_to_path_infos(self, md5s):
94 raise NotImplementedError
95
96 def exists(self, path_infos):
97 raise NotImplementedError
98
99 @classmethod
100 def _verify_path_args(cls, from_infos, to_infos, names=None):
101 assert isinstance(from_infos, list)
102 assert isinstance(to_infos, list)
103 assert len(from_infos) == len(to_infos)
104
105 if not names:
106 names = len(to_infos) * [None]
107 else:
108 assert isinstance(names, list)
109 assert len(names) == len(to_infos)
110
111 return names
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/remote/base.py b/dvc/remote/base.py
--- a/dvc/remote/base.py
+++ b/dvc/remote/base.py
@@ -1,5 +1,6 @@
import os
import re
+import errno
from dvc.config import Config
from dvc.logger import Logger
@@ -84,10 +85,11 @@
def _makedirs(self, fname):
dname = os.path.dirname(fname)
+
try:
os.makedirs(dname)
except OSError as e:
- if e.errno != os.errno.EEXIST:
+ if e.errno != errno.EEXIST:
raise
def md5s_to_path_infos(self, md5s):
| {"golden_diff": "diff --git a/dvc/remote/base.py b/dvc/remote/base.py\n--- a/dvc/remote/base.py\n+++ b/dvc/remote/base.py\n@@ -1,5 +1,6 @@\n import os\n import re\n+import errno\n \n from dvc.config import Config\n from dvc.logger import Logger\n@@ -84,10 +85,11 @@\n \n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n+\n try:\n os.makedirs(dname)\n except OSError as e:\n- if e.errno != os.errno.EEXIST:\n+ if e.errno != errno.EEXIST:\n raise\n \n def md5s_to_path_infos(self, md5s):\n", "issue": "Testing against Python 3.6 and 3.7\nhttps://github.com/pyinstaller/pyinstaller#main-advantages says that pyinstaller supports these versions of Python but https://github.com/iterative/dvc/blob/master/.travis.yml#L9 says the opposite.\n", "before_files": [{"content": "import os\nimport re\n\nfrom dvc.config import Config\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\n\n\nSTATUS_OK = 1\nSTATUS_NEW = 3\nSTATUS_DELETED = 4\n\n\nSTATUS_MAP = {\n # (local_exists, remote_exists)\n (True, True): STATUS_OK,\n (False, False): STATUS_OK,\n (True, False): STATUS_NEW,\n (False, True): STATUS_DELETED,\n}\n\n\nclass DataCloudError(DvcException):\n \"\"\" Data Cloud exception \"\"\"\n def __init__(self, msg):\n super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))\n\n\nclass RemoteBase(object):\n REGEX = None\n REQUIRES = {}\n\n def __init__(self, project, config):\n pass\n\n @classmethod\n def supported(cls, config):\n url = config[Config.SECTION_REMOTE_URL]\n url_ok = cls.match(url)\n deps_ok = all(cls.REQUIRES.values())\n if url_ok and not deps_ok:\n missing = [k for k, v in cls.REQUIRES.items() if v is None]\n msg = \"URL \\'{}\\' is supported but requires \" \\\n \"these missing dependencies: {}\"\n Logger.warn(msg.format(url, str(missing)))\n return url_ok and deps_ok\n\n @classmethod\n def match(cls, url):\n return re.match(cls.REGEX, url)\n\n def group(self, name):\n m = self.match(self.url)\n if not m:\n return None\n return m.group(name)\n\n @staticmethod\n def tmp_file(fname):\n \"\"\" Temporary name for a partial download \"\"\"\n # FIXME probably better use uuid()\n return fname + '.part'\n\n def save_info(self, path_info):\n raise NotImplementedError\n\n def save(self, path_info):\n raise NotImplementedError\n\n def checkout(self, path_info, checksum_info):\n raise NotImplementedError\n\n def download(self, from_infos, to_infos, no_progress_bar=False, name=None):\n raise NotImplementedError\n\n def upload(self, from_infos, to_infos, path_info, name=None):\n raise NotImplementedError\n\n def remove(self, path_info):\n raise NotImplementedError\n\n def move(self, path_info):\n raise NotImplementedError\n\n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n try:\n os.makedirs(dname)\n except OSError as e:\n if e.errno != os.errno.EEXIST:\n raise\n\n def md5s_to_path_infos(self, md5s):\n raise NotImplementedError\n\n def exists(self, path_infos):\n raise NotImplementedError\n\n @classmethod\n def _verify_path_args(cls, from_infos, to_infos, names=None):\n assert isinstance(from_infos, list)\n assert isinstance(to_infos, list)\n assert len(from_infos) == len(to_infos)\n\n if not names:\n names = len(to_infos) * [None]\n else:\n assert isinstance(names, list)\n assert len(names) == len(to_infos)\n\n return names\n", "path": "dvc/remote/base.py"}], "after_files": [{"content": "import os\nimport re\nimport errno\n\nfrom dvc.config import Config\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\n\n\nSTATUS_OK = 1\nSTATUS_NEW = 3\nSTATUS_DELETED = 4\n\n\nSTATUS_MAP = {\n # (local_exists, remote_exists)\n (True, True): STATUS_OK,\n (False, False): STATUS_OK,\n (True, False): STATUS_NEW,\n (False, True): STATUS_DELETED,\n}\n\n\nclass DataCloudError(DvcException):\n \"\"\" Data Cloud exception \"\"\"\n def __init__(self, msg):\n super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))\n\n\nclass RemoteBase(object):\n REGEX = None\n REQUIRES = {}\n\n def __init__(self, project, config):\n pass\n\n @classmethod\n def supported(cls, config):\n url = config[Config.SECTION_REMOTE_URL]\n url_ok = cls.match(url)\n deps_ok = all(cls.REQUIRES.values())\n if url_ok and not deps_ok:\n missing = [k for k, v in cls.REQUIRES.items() if v is None]\n msg = \"URL \\'{}\\' is supported but requires \" \\\n \"these missing dependencies: {}\"\n Logger.warn(msg.format(url, str(missing)))\n return url_ok and deps_ok\n\n @classmethod\n def match(cls, url):\n return re.match(cls.REGEX, url)\n\n def group(self, name):\n m = self.match(self.url)\n if not m:\n return None\n return m.group(name)\n\n @staticmethod\n def tmp_file(fname):\n \"\"\" Temporary name for a partial download \"\"\"\n # FIXME probably better use uuid()\n return fname + '.part'\n\n def save_info(self, path_info):\n raise NotImplementedError\n\n def save(self, path_info):\n raise NotImplementedError\n\n def checkout(self, path_info, checksum_info):\n raise NotImplementedError\n\n def download(self, from_infos, to_infos, no_progress_bar=False, name=None):\n raise NotImplementedError\n\n def upload(self, from_infos, to_infos, path_info, name=None):\n raise NotImplementedError\n\n def remove(self, path_info):\n raise NotImplementedError\n\n def move(self, path_info):\n raise NotImplementedError\n\n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n\n try:\n os.makedirs(dname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n def md5s_to_path_infos(self, md5s):\n raise NotImplementedError\n\n def exists(self, path_infos):\n raise NotImplementedError\n\n @classmethod\n def _verify_path_args(cls, from_infos, to_infos, names=None):\n assert isinstance(from_infos, list)\n assert isinstance(to_infos, list)\n assert len(from_infos) == len(to_infos)\n\n if not names:\n names = len(to_infos) * [None]\n else:\n assert isinstance(names, list)\n assert len(names) == len(to_infos)\n\n return names\n", "path": "dvc/remote/base.py"}]} | 1,210 | 160 |
gh_patches_debug_4392 | rasdani/github-patches | git_diff | encode__starlette-1940 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
middleware causes exceptions to not be raised/handled silently
### Discussed in https://github.com/encode/starlette/discussions/1976
<div type='discussions-op-text'>
<sup>Originally posted by **fraser-langton** December 6, 2022</sup>
From [issue in FastAPI](https://github.com/tiangolo/fastapi/issues/5173) - issue was said to be from starlette
Was initially noticed in FastAPI 0.74.0, was fixed after 0.79.0 but has since regressed (FastAPI 0.88.0 it isn't working)
When exceptions are raised on a subapp, the exceptions are not propagated all the way to see in console
In the example code
/info raises an exception and the full stacktrace is seen in console
/private/info does not raise the exception and only `INFO: 127.0.0.1:56308 - "GET /info HTTP/1.1" 500 Internal Server Error` is shown in console
```python
import uvicorn
from fastapi import FastAPI
from starlette.middleware.base import BaseHTTPMiddleware
app = FastAPI()
@app.get("/info")
def info():
# raises Exception as expected, the traceback is seen in console
raise Exception
private_api = FastAPI()
@private_api.get("/info")
def info():
# exception is handled silently, no traceback is seen in console
raise Exception
app.mount("/private", private_api)
class Middleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
return await call_next(request)
app.add_middleware(Middleware) # when this is removed, the exceptions are raised for all routes
if __name__ == '__main__':
uvicorn.run(app, port=8000)
```
</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/base.py`
Content:
```
1 import typing
2
3 import anyio
4
5 from starlette.requests import Request
6 from starlette.responses import Response, StreamingResponse
7 from starlette.types import ASGIApp, Message, Receive, Scope, Send
8
9 RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]
10 DispatchFunction = typing.Callable[
11 [Request, RequestResponseEndpoint], typing.Awaitable[Response]
12 ]
13 T = typing.TypeVar("T")
14
15
16 class BaseHTTPMiddleware:
17 def __init__(
18 self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None
19 ) -> None:
20 self.app = app
21 self.dispatch_func = self.dispatch if dispatch is None else dispatch
22
23 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
24 if scope["type"] != "http":
25 await self.app(scope, receive, send)
26 return
27
28 response_sent = anyio.Event()
29
30 async def call_next(request: Request) -> Response:
31 app_exc: typing.Optional[Exception] = None
32 send_stream, recv_stream = anyio.create_memory_object_stream()
33
34 async def receive_or_disconnect() -> Message:
35 if response_sent.is_set():
36 return {"type": "http.disconnect"}
37
38 async with anyio.create_task_group() as task_group:
39
40 async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:
41 result = await func()
42 task_group.cancel_scope.cancel()
43 return result
44
45 task_group.start_soon(wrap, response_sent.wait)
46 message = await wrap(request.receive)
47
48 if response_sent.is_set():
49 return {"type": "http.disconnect"}
50
51 return message
52
53 async def close_recv_stream_on_response_sent() -> None:
54 await response_sent.wait()
55 recv_stream.close()
56
57 async def send_no_error(message: Message) -> None:
58 try:
59 await send_stream.send(message)
60 except anyio.BrokenResourceError:
61 # recv_stream has been closed, i.e. response_sent has been set.
62 return
63
64 async def coro() -> None:
65 nonlocal app_exc
66
67 async with send_stream:
68 try:
69 await self.app(scope, receive_or_disconnect, send_no_error)
70 except Exception as exc:
71 app_exc = exc
72
73 task_group.start_soon(close_recv_stream_on_response_sent)
74 task_group.start_soon(coro)
75
76 try:
77 message = await recv_stream.receive()
78 except anyio.EndOfStream:
79 if app_exc is not None:
80 raise app_exc
81 raise RuntimeError("No response returned.")
82
83 assert message["type"] == "http.response.start"
84
85 async def body_stream() -> typing.AsyncGenerator[bytes, None]:
86 async with recv_stream:
87 async for message in recv_stream:
88 assert message["type"] == "http.response.body"
89 body = message.get("body", b"")
90 if body:
91 yield body
92 if not message.get("more_body", False):
93 break
94
95 if app_exc is not None:
96 raise app_exc
97
98 response = StreamingResponse(
99 status_code=message["status"], content=body_stream()
100 )
101 response.raw_headers = message["headers"]
102 return response
103
104 async with anyio.create_task_group() as task_group:
105 request = Request(scope, receive=receive)
106 response = await self.dispatch_func(request, call_next)
107 await response(scope, receive, send)
108 response_sent.set()
109
110 async def dispatch(
111 self, request: Request, call_next: RequestResponseEndpoint
112 ) -> Response:
113 raise NotImplementedError() # pragma: no cover
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py
--- a/starlette/middleware/base.py
+++ b/starlette/middleware/base.py
@@ -89,8 +89,6 @@
body = message.get("body", b"")
if body:
yield body
- if not message.get("more_body", False):
- break
if app_exc is not None:
raise app_exc
| {"golden_diff": "diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py\n--- a/starlette/middleware/base.py\n+++ b/starlette/middleware/base.py\n@@ -89,8 +89,6 @@\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n- if not message.get(\"more_body\", False):\n- break\n \n if app_exc is not None:\n raise app_exc\n", "issue": "middleware causes exceptions to not be raised/handled silently\n### Discussed in https://github.com/encode/starlette/discussions/1976\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **fraser-langton** December 6, 2022</sup>\r\nFrom [issue in FastAPI](https://github.com/tiangolo/fastapi/issues/5173) - issue was said to be from starlette\r\n\r\nWas initially noticed in FastAPI 0.74.0, was fixed after 0.79.0 but has since regressed (FastAPI 0.88.0 it isn't working)\r\n\r\nWhen exceptions are raised on a subapp, the exceptions are not propagated all the way to see in console\r\nIn the example code\r\n/info raises an exception and the full stacktrace is seen in console\r\n/private/info does not raise the exception and only `INFO: 127.0.0.1:56308 - \"GET /info HTTP/1.1\" 500 Internal Server Error` is shown in console\r\n\r\n```python\r\nimport uvicorn\r\nfrom fastapi import FastAPI\r\nfrom starlette.middleware.base import BaseHTTPMiddleware\r\n\r\n\r\napp = FastAPI()\r\n\r\n\r\[email protected](\"/info\")\r\ndef info():\r\n # raises Exception as expected, the traceback is seen in console\r\n raise Exception\r\n\r\n\r\nprivate_api = FastAPI()\r\n\r\n\r\n@private_api.get(\"/info\")\r\ndef info():\r\n # exception is handled silently, no traceback is seen in console\r\n raise Exception\r\n\r\n\r\napp.mount(\"/private\", private_api)\r\n\r\n\r\nclass Middleware(BaseHTTPMiddleware):\r\n\r\n async def dispatch(self, request, call_next):\r\n return await call_next(request)\r\n\r\n\r\napp.add_middleware(Middleware) # when this is removed, the exceptions are raised for all routes\r\n\r\n\r\nif __name__ == '__main__':\r\n uvicorn.run(app, port=8000)\r\n```\r\n</div>\n", "before_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\nT = typing.TypeVar(\"T\")\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_sent = anyio.Event()\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def receive_or_disconnect() -> Message:\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n async with anyio.create_task_group() as task_group:\n\n async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:\n result = await func()\n task_group.cancel_scope.cancel()\n return result\n\n task_group.start_soon(wrap, response_sent.wait)\n message = await wrap(request.receive)\n\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n return message\n\n async def close_recv_stream_on_response_sent() -> None:\n await response_sent.wait()\n recv_stream.close()\n\n async def send_no_error(message: Message) -> None:\n try:\n await send_stream.send(message)\n except anyio.BrokenResourceError:\n # recv_stream has been closed, i.e. response_sent has been set.\n return\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, receive_or_disconnect, send_no_error)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(close_recv_stream_on_response_sent)\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n response_sent.set()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}], "after_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\nT = typing.TypeVar(\"T\")\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_sent = anyio.Event()\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def receive_or_disconnect() -> Message:\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n async with anyio.create_task_group() as task_group:\n\n async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:\n result = await func()\n task_group.cancel_scope.cancel()\n return result\n\n task_group.start_soon(wrap, response_sent.wait)\n message = await wrap(request.receive)\n\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n return message\n\n async def close_recv_stream_on_response_sent() -> None:\n await response_sent.wait()\n recv_stream.close()\n\n async def send_no_error(message: Message) -> None:\n try:\n await send_stream.send(message)\n except anyio.BrokenResourceError:\n # recv_stream has been closed, i.e. response_sent has been set.\n return\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, receive_or_disconnect, send_no_error)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(close_recv_stream_on_response_sent)\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n response_sent.set()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}]} | 1,682 | 96 |
gh_patches_debug_59726 | rasdani/github-patches | git_diff | pytorch__audio-755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🚀 Feature Request: Opus audio format support
## 🚀 Feature
<!-- -->
Add opus format
## Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
Opus is a good and popular lossy audio coding format. A lot of audio files are stored in opus format but it's impossible to use it with torchaudio.load().
## Pitch
<!-- A clear and concise description of what you want to happen. -->
Please, make possible to do
audio = torchaudio.load("my_audio.opus")
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build_tools/setup_helpers/extension.py`
Content:
```
1 import os
2 import platform
3 import subprocess
4 from pathlib import Path
5
6 from torch.utils.cpp_extension import (
7 CppExtension,
8 BuildExtension as TorchBuildExtension
9 )
10
11 __all__ = [
12 'get_ext_modules',
13 'BuildExtension',
14 ]
15
16 _THIS_DIR = Path(__file__).parent.resolve()
17 _ROOT_DIR = _THIS_DIR.parent.parent.resolve()
18 _CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'
19 _TP_BASE_DIR = _ROOT_DIR / 'third_party'
20 _TP_INSTALL_DIR = _TP_BASE_DIR / 'install'
21
22
23 def _get_build_sox():
24 val = os.environ.get('BUILD_SOX', '0')
25 trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']
26 falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']
27 if val in trues:
28 return True
29 if val not in falses:
30 print(
31 f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '
32 f'Expected one of {trues + falses}')
33 return False
34
35
36 _BUILD_SOX = _get_build_sox()
37
38
39 def _get_eca(debug):
40 eca = []
41 if debug:
42 eca += ["-O0", "-g"]
43 else:
44 eca += ["-O3"]
45 return eca
46
47
48 def _get_ela(debug):
49 ela = []
50 if debug:
51 if platform.system() == "Windows":
52 ela += ["/DEBUG:FULL"]
53 else:
54 ela += ["-O0", "-g"]
55 else:
56 ela += ["-O3"]
57 return ela
58
59
60 def _get_srcs():
61 return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]
62
63
64 def _get_include_dirs():
65 dirs = [
66 str(_ROOT_DIR),
67 ]
68 if _BUILD_SOX:
69 dirs.append(str(_TP_INSTALL_DIR / 'include'))
70 return dirs
71
72
73 def _get_extra_objects():
74 objs = []
75 if _BUILD_SOX:
76 # NOTE: The order of the library listed bellow matters.
77 #
78 # (the most important thing is that dependencies come after a library
79 # e.g., sox comes first, flac/vorbis comes before ogg, and
80 # vorbisenc/vorbisfile comes before vorbis
81 libs = [
82 'libsox.a',
83 'libmad.a',
84 'libFLAC.a',
85 'libmp3lame.a',
86 'libvorbisenc.a',
87 'libvorbisfile.a',
88 'libvorbis.a',
89 'libogg.a',
90 ]
91 for lib in libs:
92 objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))
93 return objs
94
95
96 def _get_libraries():
97 return [] if _BUILD_SOX else ['sox']
98
99
100 def _build_third_party():
101 build_dir = str(_TP_BASE_DIR / 'build')
102 os.makedirs(build_dir, exist_ok=True)
103 subprocess.run(
104 args=['cmake', '..'],
105 cwd=build_dir,
106 check=True,
107 )
108 subprocess.run(
109 args=['cmake', '--build', '.'],
110 cwd=build_dir,
111 check=True,
112 )
113
114
115 _EXT_NAME = 'torchaudio._torchaudio'
116
117
118 def get_ext_modules(debug=False):
119 if platform.system() == 'Windows':
120 return None
121 return [
122 CppExtension(
123 _EXT_NAME,
124 _get_srcs(),
125 libraries=_get_libraries(),
126 include_dirs=_get_include_dirs(),
127 extra_compile_args=_get_eca(debug),
128 extra_objects=_get_extra_objects(),
129 extra_link_args=_get_ela(debug),
130 ),
131 ]
132
133
134 class BuildExtension(TorchBuildExtension):
135 def build_extension(self, ext):
136 if ext.name == _EXT_NAME and _BUILD_SOX:
137 _build_third_party()
138 super().build_extension(ext)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py
--- a/build_tools/setup_helpers/extension.py
+++ b/build_tools/setup_helpers/extension.py
@@ -83,6 +83,8 @@
'libmad.a',
'libFLAC.a',
'libmp3lame.a',
+ 'libopusfile.a',
+ 'libopus.a',
'libvorbisenc.a',
'libvorbisfile.a',
'libvorbis.a',
| {"golden_diff": "diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py\n--- a/build_tools/setup_helpers/extension.py\n+++ b/build_tools/setup_helpers/extension.py\n@@ -83,6 +83,8 @@\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n+ 'libopusfile.a',\n+ 'libopus.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n", "issue": "\ud83d\ude80 Feature Request: Opus audio format support\n## \ud83d\ude80 Feature\r\n<!-- -->\r\nAdd opus format \r\n## Motivation\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\nOpus is a good and popular lossy audio coding format. A lot of audio files are stored in opus format but it's impossible to use it with torchaudio.load(). \r\n## Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nPlease, make possible to do \r\naudio = torchaudio.load(\"my_audio.opus\")\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'install'\n\n\ndef _get_build_sox():\n val = os.environ.get('BUILD_SOX', '0')\n trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']\n falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']\n if val in trues:\n return True\n if val not in falses:\n print(\n f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '\n f'Expected one of {trues + falses}')\n return False\n\n\n_BUILD_SOX = _get_build_sox()\n\n\ndef _get_eca(debug):\n eca = []\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n eca += [\"-O3\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n dirs = [\n str(_ROOT_DIR),\n ]\n if _BUILD_SOX:\n dirs.append(str(_TP_INSTALL_DIR / 'include'))\n return dirs\n\n\ndef _get_extra_objects():\n objs = []\n if _BUILD_SOX:\n # NOTE: The order of the library listed bellow matters.\n #\n # (the most important thing is that dependencies come after a library\n # e.g., sox comes first, flac/vorbis comes before ogg, and\n # vorbisenc/vorbisfile comes before vorbis\n libs = [\n 'libsox.a',\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n 'libogg.a',\n ]\n for lib in libs:\n objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))\n return objs\n\n\ndef _get_libraries():\n return [] if _BUILD_SOX else ['sox']\n\n\ndef _build_third_party():\n build_dir = str(_TP_BASE_DIR / 'build')\n os.makedirs(build_dir, exist_ok=True)\n subprocess.run(\n args=['cmake', '..'],\n cwd=build_dir,\n check=True,\n )\n subprocess.run(\n args=['cmake', '--build', '.'],\n cwd=build_dir,\n check=True,\n )\n\n\n_EXT_NAME = 'torchaudio._torchaudio'\n\n\ndef get_ext_modules(debug=False):\n if platform.system() == 'Windows':\n return None\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_objects=_get_extra_objects(),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME and _BUILD_SOX:\n _build_third_party()\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}], "after_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'install'\n\n\ndef _get_build_sox():\n val = os.environ.get('BUILD_SOX', '0')\n trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']\n falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']\n if val in trues:\n return True\n if val not in falses:\n print(\n f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '\n f'Expected one of {trues + falses}')\n return False\n\n\n_BUILD_SOX = _get_build_sox()\n\n\ndef _get_eca(debug):\n eca = []\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n eca += [\"-O3\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n dirs = [\n str(_ROOT_DIR),\n ]\n if _BUILD_SOX:\n dirs.append(str(_TP_INSTALL_DIR / 'include'))\n return dirs\n\n\ndef _get_extra_objects():\n objs = []\n if _BUILD_SOX:\n # NOTE: The order of the library listed bellow matters.\n #\n # (the most important thing is that dependencies come after a library\n # e.g., sox comes first, flac/vorbis comes before ogg, and\n # vorbisenc/vorbisfile comes before vorbis\n libs = [\n 'libsox.a',\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n 'libopusfile.a',\n 'libopus.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n 'libogg.a',\n ]\n for lib in libs:\n objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))\n return objs\n\n\ndef _get_libraries():\n return [] if _BUILD_SOX else ['sox']\n\n\ndef _build_third_party():\n build_dir = str(_TP_BASE_DIR / 'build')\n os.makedirs(build_dir, exist_ok=True)\n subprocess.run(\n args=['cmake', '..'],\n cwd=build_dir,\n check=True,\n )\n subprocess.run(\n args=['cmake', '--build', '.'],\n cwd=build_dir,\n check=True,\n )\n\n\n_EXT_NAME = 'torchaudio._torchaudio'\n\n\ndef get_ext_modules(debug=False):\n if platform.system() == 'Windows':\n return None\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_objects=_get_extra_objects(),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME and _BUILD_SOX:\n _build_third_party()\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}]} | 1,590 | 115 |
gh_patches_debug_24556 | rasdani/github-patches | git_diff | scrapy__scrapy-5611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BOM should take precedence over Content-Type header when detecting the encoding
Currently Scrapy uses headers first to detect the encoding. But browsers actually put a higher priority for BOM; this is also in WHATWG [standard](https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding). It can be checked e.g. by running this server, and opening URL in a browser - UTF-8 is used by browser, but cp1251 is used by Scrapy:
```py
import codecs
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
class HttpGetHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html; charset=cp1251")
self.end_headers()
self.wfile.write(codecs.BOM_UTF8)
self.wfile.write("<!DOCTYPE html>".encode('utf8'))
self.wfile.write("Привет!".encode('utf8'))
if __name__ == '__main__':
httpd = HTTPServer(('', 8000), HttpGetHandler)
httpd.serve_forever()
```
When opening this page in a browser, it shows "Привет!".
Spider code to check it:
```py
import scrapy
from scrapy.crawler import CrawlerProcess
class MySpider(scrapy.Spider):
name = "tst"
start_urls = ["http://0.0.0.0:8000"]
def parse(self, response):
return {"encoding": response.encoding, "text": response.text}
if __name__ == '__main__':
process = CrawlerProcess()
process.crawl(MySpider)
process.start()
```
Spider outputs
> {'encoding': 'cp1251', 'text': '<!DOCTYPE html>Привет!'}
See also: https://github.com/scrapy/w3lib/issues/189 - it's a similar issue, but fixing it in w3lib is not enough to make it working in Scrapy.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/http/response/text.py`
Content:
```
1 """
2 This module implements the TextResponse class which adds encoding handling and
3 discovering (through HTTP headers) to base Response class.
4
5 See documentation in docs/topics/request-response.rst
6 """
7
8 import json
9 from contextlib import suppress
10 from typing import Generator, Tuple
11 from urllib.parse import urljoin
12
13 import parsel
14 from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,
15 http_content_type_encoding, resolve_encoding)
16 from w3lib.html import strip_html5_whitespace
17
18 from scrapy.http import Request
19 from scrapy.http.response import Response
20 from scrapy.utils.python import memoizemethod_noargs, to_unicode
21 from scrapy.utils.response import get_base_url
22
23 _NONE = object()
24
25
26 class TextResponse(Response):
27
28 _DEFAULT_ENCODING = 'ascii'
29 _cached_decoded_json = _NONE
30
31 attributes: Tuple[str, ...] = Response.attributes + ("encoding",)
32
33 def __init__(self, *args, **kwargs):
34 self._encoding = kwargs.pop('encoding', None)
35 self._cached_benc = None
36 self._cached_ubody = None
37 self._cached_selector = None
38 super().__init__(*args, **kwargs)
39
40 def _set_url(self, url):
41 if isinstance(url, str):
42 self._url = to_unicode(url, self.encoding)
43 else:
44 super()._set_url(url)
45
46 def _set_body(self, body):
47 self._body = b'' # used by encoding detection
48 if isinstance(body, str):
49 if self._encoding is None:
50 raise TypeError('Cannot convert unicode body - '
51 f'{type(self).__name__} has no encoding')
52 self._body = body.encode(self._encoding)
53 else:
54 super()._set_body(body)
55
56 @property
57 def encoding(self):
58 return self._declared_encoding() or self._body_inferred_encoding()
59
60 def _declared_encoding(self):
61 return (
62 self._encoding
63 or self._headers_encoding()
64 or self._body_declared_encoding()
65 )
66
67 def json(self):
68 """
69 .. versionadded:: 2.2
70
71 Deserialize a JSON document to a Python object.
72 """
73 if self._cached_decoded_json is _NONE:
74 self._cached_decoded_json = json.loads(self.text)
75 return self._cached_decoded_json
76
77 @property
78 def text(self):
79 """ Body as unicode """
80 # access self.encoding before _cached_ubody to make sure
81 # _body_inferred_encoding is called
82 benc = self.encoding
83 if self._cached_ubody is None:
84 charset = f'charset={benc}'
85 self._cached_ubody = html_to_unicode(charset, self.body)[1]
86 return self._cached_ubody
87
88 def urljoin(self, url):
89 """Join this Response's url with a possible relative url to form an
90 absolute interpretation of the latter."""
91 return urljoin(get_base_url(self), url)
92
93 @memoizemethod_noargs
94 def _headers_encoding(self):
95 content_type = self.headers.get(b'Content-Type', b'')
96 return http_content_type_encoding(to_unicode(content_type))
97
98 def _body_inferred_encoding(self):
99 if self._cached_benc is None:
100 content_type = to_unicode(self.headers.get(b'Content-Type', b''))
101 benc, ubody = html_to_unicode(content_type, self.body,
102 auto_detect_fun=self._auto_detect_fun,
103 default_encoding=self._DEFAULT_ENCODING)
104 self._cached_benc = benc
105 self._cached_ubody = ubody
106 return self._cached_benc
107
108 def _auto_detect_fun(self, text):
109 for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
110 try:
111 text.decode(enc)
112 except UnicodeError:
113 continue
114 return resolve_encoding(enc)
115
116 @memoizemethod_noargs
117 def _body_declared_encoding(self):
118 return html_body_declared_encoding(self.body)
119
120 @property
121 def selector(self):
122 from scrapy.selector import Selector
123 if self._cached_selector is None:
124 self._cached_selector = Selector(self)
125 return self._cached_selector
126
127 def xpath(self, query, **kwargs):
128 return self.selector.xpath(query, **kwargs)
129
130 def css(self, query):
131 return self.selector.css(query)
132
133 def follow(self, url, callback=None, method='GET', headers=None, body=None,
134 cookies=None, meta=None, encoding=None, priority=0,
135 dont_filter=False, errback=None, cb_kwargs=None, flags=None):
136 # type: (...) -> Request
137 """
138 Return a :class:`~.Request` instance to follow a link ``url``.
139 It accepts the same arguments as ``Request.__init__`` method,
140 but ``url`` can be not only an absolute URL, but also
141
142 * a relative URL
143 * a :class:`~scrapy.link.Link` object, e.g. the result of
144 :ref:`topics-link-extractors`
145 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
146 ``response.css('a.my_link')[0]``
147 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
148 ``response.css('a::attr(href)')[0]`` or
149 ``response.xpath('//img/@src')[0]``
150
151 See :ref:`response-follow-example` for usage examples.
152 """
153 if isinstance(url, parsel.Selector):
154 url = _url_from_selector(url)
155 elif isinstance(url, parsel.SelectorList):
156 raise ValueError("SelectorList is not supported")
157 encoding = self.encoding if encoding is None else encoding
158 return super().follow(
159 url=url,
160 callback=callback,
161 method=method,
162 headers=headers,
163 body=body,
164 cookies=cookies,
165 meta=meta,
166 encoding=encoding,
167 priority=priority,
168 dont_filter=dont_filter,
169 errback=errback,
170 cb_kwargs=cb_kwargs,
171 flags=flags,
172 )
173
174 def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,
175 cookies=None, meta=None, encoding=None, priority=0,
176 dont_filter=False, errback=None, cb_kwargs=None, flags=None,
177 css=None, xpath=None):
178 # type: (...) -> Generator[Request, None, None]
179 """
180 A generator that produces :class:`~.Request` instances to follow all
181 links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s
182 ``__init__`` method, except that each ``urls`` element does not need to be
183 an absolute URL, it can be any of the following:
184
185 * a relative URL
186 * a :class:`~scrapy.link.Link` object, e.g. the result of
187 :ref:`topics-link-extractors`
188 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
189 ``response.css('a.my_link')[0]``
190 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
191 ``response.css('a::attr(href)')[0]`` or
192 ``response.xpath('//img/@src')[0]``
193
194 In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction
195 within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).
196
197 Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or
198 using the ``css`` or ``xpath`` parameters, this method will not produce requests for
199 selectors from which links cannot be obtained (for instance, anchor tags without an
200 ``href`` attribute)
201 """
202 arguments = [x for x in (urls, css, xpath) if x is not None]
203 if len(arguments) != 1:
204 raise ValueError(
205 "Please supply exactly one of the following arguments: urls, css, xpath"
206 )
207 if not urls:
208 if css:
209 urls = self.css(css)
210 if xpath:
211 urls = self.xpath(xpath)
212 if isinstance(urls, parsel.SelectorList):
213 selectors = urls
214 urls = []
215 for sel in selectors:
216 with suppress(_InvalidSelector):
217 urls.append(_url_from_selector(sel))
218 return super().follow_all(
219 urls=urls,
220 callback=callback,
221 method=method,
222 headers=headers,
223 body=body,
224 cookies=cookies,
225 meta=meta,
226 encoding=encoding,
227 priority=priority,
228 dont_filter=dont_filter,
229 errback=errback,
230 cb_kwargs=cb_kwargs,
231 flags=flags,
232 )
233
234
235 class _InvalidSelector(ValueError):
236 """
237 Raised when a URL cannot be obtained from a Selector
238 """
239
240
241 def _url_from_selector(sel):
242 # type: (parsel.Selector) -> str
243 if isinstance(sel.root, str):
244 # e.g. ::attr(href) result
245 return strip_html5_whitespace(sel.root)
246 if not hasattr(sel.root, 'tag'):
247 raise _InvalidSelector(f"Unsupported selector: {sel}")
248 if sel.root.tag not in ('a', 'link'):
249 raise _InvalidSelector("Only <a> and <link> elements are supported; "
250 f"got <{sel.root.tag}>")
251 href = sel.root.get('href')
252 if href is None:
253 raise _InvalidSelector(f"<{sel.root.tag}> element has no href attribute: {sel}")
254 return strip_html5_whitespace(href)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py
--- a/scrapy/http/response/text.py
+++ b/scrapy/http/response/text.py
@@ -11,8 +11,13 @@
from urllib.parse import urljoin
import parsel
-from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,
- http_content_type_encoding, resolve_encoding)
+from w3lib.encoding import (
+ html_body_declared_encoding,
+ html_to_unicode,
+ http_content_type_encoding,
+ resolve_encoding,
+ read_bom,
+)
from w3lib.html import strip_html5_whitespace
from scrapy.http import Request
@@ -60,6 +65,7 @@
def _declared_encoding(self):
return (
self._encoding
+ or self._bom_encoding()
or self._headers_encoding()
or self._body_declared_encoding()
)
@@ -117,6 +123,10 @@
def _body_declared_encoding(self):
return html_body_declared_encoding(self.body)
+ @memoizemethod_noargs
+ def _bom_encoding(self):
+ return read_bom(self.body)[0]
+
@property
def selector(self):
from scrapy.selector import Selector
| {"golden_diff": "diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py\n--- a/scrapy/http/response/text.py\n+++ b/scrapy/http/response/text.py\n@@ -11,8 +11,13 @@\n from urllib.parse import urljoin\n \n import parsel\n-from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n- http_content_type_encoding, resolve_encoding)\n+from w3lib.encoding import (\n+ html_body_declared_encoding,\n+ html_to_unicode,\n+ http_content_type_encoding,\n+ resolve_encoding,\n+ read_bom,\n+)\n from w3lib.html import strip_html5_whitespace\n \n from scrapy.http import Request\n@@ -60,6 +65,7 @@\n def _declared_encoding(self):\n return (\n self._encoding\n+ or self._bom_encoding()\n or self._headers_encoding()\n or self._body_declared_encoding()\n )\n@@ -117,6 +123,10 @@\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n \n+ @memoizemethod_noargs\n+ def _bom_encoding(self):\n+ return read_bom(self.body)[0]\n+\n @property\n def selector(self):\n from scrapy.selector import Selector\n", "issue": "BOM should take precedence over Content-Type header when detecting the encoding\nCurrently Scrapy uses headers first to detect the encoding. But browsers actually put a higher priority for BOM; this is also in WHATWG [standard](https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding). It can be checked e.g. by running this server, and opening URL in a browser - UTF-8 is used by browser, but cp1251 is used by Scrapy:\r\n\r\n```py\r\nimport codecs\r\nfrom http.server import BaseHTTPRequestHandler\r\nfrom http.server import HTTPServer\r\n\r\n\r\nclass HttpGetHandler(BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html; charset=cp1251\")\r\n self.end_headers()\r\n self.wfile.write(codecs.BOM_UTF8)\r\n self.wfile.write(\"<!DOCTYPE html>\".encode('utf8'))\r\n self.wfile.write(\"\u041f\u0440\u0438\u0432\u0435\u0442!\".encode('utf8'))\r\n\r\n\r\nif __name__ == '__main__':\r\n httpd = HTTPServer(('', 8000), HttpGetHandler)\r\n httpd.serve_forever()\r\n```\r\n\r\nWhen opening this page in a browser, it shows \"\u041f\u0440\u0438\u0432\u0435\u0442!\".\r\n\r\nSpider code to check it:\r\n\r\n```py\r\nimport scrapy\r\nfrom scrapy.crawler import CrawlerProcess\r\n\r\n\r\nclass MySpider(scrapy.Spider):\r\n name = \"tst\"\r\n\r\n start_urls = [\"http://0.0.0.0:8000\"]\r\n\r\n def parse(self, response):\r\n return {\"encoding\": response.encoding, \"text\": response.text}\r\n\r\n\r\nif __name__ == '__main__':\r\n process = CrawlerProcess()\r\n process.crawl(MySpider)\r\n process.start()\r\n```\r\n\r\nSpider outputs\r\n\r\n> {'encoding': 'cp1251', 'text': '\u043f\u00bb\u0457<!DOCTYPE html>\u0420\u045f\u0421\u0402\u0420\u0451\u0420\u0406\u0420\u00b5\u0421\u201a!'}\r\n\r\nSee also: https://github.com/scrapy/w3lib/issues/189 - it's a similar issue, but fixing it in w3lib is not enough to make it working in Scrapy.\n", "before_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nimport json\nfrom contextlib import suppress\nfrom typing import Generator, Tuple\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n_NONE = object()\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n _cached_decoded_json = _NONE\n\n attributes: Tuple[str, ...] = Response.attributes + (\"encoding\",)\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super().__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super()._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - '\n f'{type(self).__name__} has no encoding')\n self._body = body.encode(self._encoding)\n else:\n super()._set_body(body)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return (\n self._encoding\n or self._headers_encoding()\n or self._body_declared_encoding()\n )\n\n def json(self):\n \"\"\"\n .. versionadded:: 2.2\n\n Deserialize a JSON document to a Python object.\n \"\"\"\n if self._cached_decoded_json is _NONE:\n self._cached_decoded_json = json.loads(self.text)\n return self._cached_decoded_json\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = f'charset={benc}'\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super().follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arguments = [x for x in (urls, css, xpath) if x is not None]\n if len(arguments) != 1:\n raise ValueError(\n \"Please supply exactly one of the following arguments: urls, css, xpath\"\n )\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super().follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(f\"Unsupported selector: {sel}\")\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; \"\n f\"got <{sel.root.tag}>\")\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(f\"<{sel.root.tag}> element has no href attribute: {sel}\")\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}], "after_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nimport json\nfrom contextlib import suppress\nfrom typing import Generator, Tuple\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (\n html_body_declared_encoding,\n html_to_unicode,\n http_content_type_encoding,\n resolve_encoding,\n read_bom,\n)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n_NONE = object()\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n _cached_decoded_json = _NONE\n\n attributes: Tuple[str, ...] = Response.attributes + (\"encoding\",)\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super().__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super()._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - '\n f'{type(self).__name__} has no encoding')\n self._body = body.encode(self._encoding)\n else:\n super()._set_body(body)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return (\n self._encoding\n or self._bom_encoding()\n or self._headers_encoding()\n or self._body_declared_encoding()\n )\n\n def json(self):\n \"\"\"\n .. versionadded:: 2.2\n\n Deserialize a JSON document to a Python object.\n \"\"\"\n if self._cached_decoded_json is _NONE:\n self._cached_decoded_json = json.loads(self.text)\n return self._cached_decoded_json\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = f'charset={benc}'\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @memoizemethod_noargs\n def _bom_encoding(self):\n return read_bom(self.body)[0]\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super().follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arguments = [x for x in (urls, css, xpath) if x is not None]\n if len(arguments) != 1:\n raise ValueError(\n \"Please supply exactly one of the following arguments: urls, css, xpath\"\n )\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super().follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(f\"Unsupported selector: {sel}\")\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; \"\n f\"got <{sel.root.tag}>\")\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(f\"<{sel.root.tag}> element has no href attribute: {sel}\")\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}]} | 3,473 | 283 |
gh_patches_debug_39666 | rasdani/github-patches | git_diff | SeldonIO__MLServer-288 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider using `tobytes()` instead of `tolist()` in numpy codec for performance improvement
Our use-case is to make multiple requests to MLServer and all of them will have a 640x640 image data in numpy format as input.
Given that `tobytes()` is way faster than `tolist()` does it make sense to support it by default in the numpy codec here: https://github.com/SeldonIO/MLServer/blob/61d9f078d1f886d14083551f33db6a3146f12745/mlserver/codecs/numpy.py#L54
----
Performance comparison of the two methods:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/codecs/numpy.py`
Content:
```
1 import numpy as np
2
3 from ..types import RequestInput, ResponseOutput
4
5 from .base import InputCodec, register_input_codec, register_request_codec
6 from .utils import FirstInputRequestCodec
7
8 _DatatypeToNumpy = {
9 "BOOL": "bool",
10 "UINT8": "uint8",
11 "UINT16": "uint16",
12 "UINT32": "uint32",
13 "UINT64": "uint64",
14 "INT8": "int8",
15 "INT16": "int16",
16 "INT32": "int32",
17 "INT64": "int64",
18 "FP16": "float16",
19 "FP32": "float32",
20 "FP64": "float64",
21 "BYTES": "byte",
22 }
23
24 _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
25
26 # NOTE: numpy has more types than v2 protocol
27 _NumpyToDatatype["object"] = "BYTES"
28
29
30 def _to_dtype(datatype: str) -> "np.dtype":
31 dtype = _DatatypeToNumpy[datatype]
32 return np.dtype(dtype)
33
34
35 def _to_datatype(dtype: np.dtype) -> str:
36 as_str = str(dtype)
37 datatype = _NumpyToDatatype[as_str]
38
39 return datatype
40
41
42 @register_input_codec
43 class NumpyCodec(InputCodec):
44 """
45 Encodes a tensor as a numpy array.
46 """
47
48 ContentType = "np"
49
50 @classmethod
51 def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:
52 return ResponseOutput(
53 name=name,
54 datatype=_to_datatype(payload.dtype),
55 shape=list(payload.shape),
56 data=payload.flatten().tolist(),
57 )
58
59 @classmethod
60 def decode(cls, request_input: RequestInput) -> np.ndarray:
61 dtype = _to_dtype(request_input.datatype)
62 data = getattr(request_input.data, "__root__", request_input.data)
63
64 model_data = np.array(data, dtype=dtype)
65
66 # TODO: Check if reshape not valid
67 return model_data.reshape(request_input.shape)
68
69
70 @register_request_codec
71 class NumpyRequestCodec(FirstInputRequestCodec):
72 InputCodec = NumpyCodec
73 ContentType = NumpyCodec.ContentType
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py
--- a/mlserver/codecs/numpy.py
+++ b/mlserver/codecs/numpy.py
@@ -1,5 +1,7 @@
import numpy as np
+from typing import Any
+
from ..types import RequestInput, ResponseOutput
from .base import InputCodec, register_input_codec, register_request_codec
@@ -18,27 +20,59 @@
"FP16": "float16",
"FP32": "float32",
"FP64": "float64",
- "BYTES": "byte",
+ "BYTES": "bytes",
}
_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
# NOTE: numpy has more types than v2 protocol
_NumpyToDatatype["object"] = "BYTES"
+_NumpyToDatatype["S"] = "BYTES"
+
+def _to_dtype(request_input: RequestInput) -> "np.dtype":
+ dtype = _DatatypeToNumpy[request_input.datatype]
+
+ if request_input.datatype == "BYTES":
+ # bytes have variable size, so need to specify as part of type
+ # TODO: Make elem size variable (and not just the last dimension)
+ elem_size = request_input.shape[-1]
+ return np.dtype((dtype, elem_size))
-def _to_dtype(datatype: str) -> "np.dtype":
- dtype = _DatatypeToNumpy[datatype]
return np.dtype(dtype)
def _to_datatype(dtype: np.dtype) -> str:
as_str = str(dtype)
+
+ if as_str not in _NumpyToDatatype:
+ # If not present, try with kind
+ as_str = getattr(dtype, "kind")
+
datatype = _NumpyToDatatype[as_str]
return datatype
+def _to_ndarray(request_input: RequestInput) -> np.ndarray:
+ data = getattr(request_input.data, "__root__", request_input.data)
+ dtype = _to_dtype(request_input)
+
+ if request_input.datatype == "BYTES":
+ return np.frombuffer(data, dtype)
+
+ return np.array(data, dtype)
+
+
+def _encode_data(data: np.ndarray, datatype: str) -> Any:
+ if datatype == "BYTES":
+ # tobytes is way faster than tolist, although it's harder to serialise
+ # and only makes sense for actual bytes inputs (#253)
+ return data.tobytes()
+
+ return data.flatten().tolist()
+
+
@register_input_codec
class NumpyCodec(InputCodec):
"""
@@ -49,19 +83,18 @@
@classmethod
def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:
+ datatype = _to_datatype(payload.dtype)
+
return ResponseOutput(
name=name,
- datatype=_to_datatype(payload.dtype),
+ datatype=datatype,
shape=list(payload.shape),
- data=payload.flatten().tolist(),
+ data=_encode_data(payload, datatype),
)
@classmethod
def decode(cls, request_input: RequestInput) -> np.ndarray:
- dtype = _to_dtype(request_input.datatype)
- data = getattr(request_input.data, "__root__", request_input.data)
-
- model_data = np.array(data, dtype=dtype)
+ model_data = _to_ndarray(request_input)
# TODO: Check if reshape not valid
return model_data.reshape(request_input.shape)
| {"golden_diff": "diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py\n--- a/mlserver/codecs/numpy.py\n+++ b/mlserver/codecs/numpy.py\n@@ -1,5 +1,7 @@\n import numpy as np\n \n+from typing import Any\n+\n from ..types import RequestInput, ResponseOutput\n \n from .base import InputCodec, register_input_codec, register_request_codec\n@@ -18,27 +20,59 @@\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n- \"BYTES\": \"byte\",\n+ \"BYTES\": \"bytes\",\n }\n \n _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n \n # NOTE: numpy has more types than v2 protocol\n _NumpyToDatatype[\"object\"] = \"BYTES\"\n+_NumpyToDatatype[\"S\"] = \"BYTES\"\n+\n \n+def _to_dtype(request_input: RequestInput) -> \"np.dtype\":\n+ dtype = _DatatypeToNumpy[request_input.datatype]\n+\n+ if request_input.datatype == \"BYTES\":\n+ # bytes have variable size, so need to specify as part of type\n+ # TODO: Make elem size variable (and not just the last dimension)\n+ elem_size = request_input.shape[-1]\n+ return np.dtype((dtype, elem_size))\n \n-def _to_dtype(datatype: str) -> \"np.dtype\":\n- dtype = _DatatypeToNumpy[datatype]\n return np.dtype(dtype)\n \n \n def _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n+\n+ if as_str not in _NumpyToDatatype:\n+ # If not present, try with kind\n+ as_str = getattr(dtype, \"kind\")\n+\n datatype = _NumpyToDatatype[as_str]\n \n return datatype\n \n \n+def _to_ndarray(request_input: RequestInput) -> np.ndarray:\n+ data = getattr(request_input.data, \"__root__\", request_input.data)\n+ dtype = _to_dtype(request_input)\n+\n+ if request_input.datatype == \"BYTES\":\n+ return np.frombuffer(data, dtype)\n+\n+ return np.array(data, dtype)\n+\n+\n+def _encode_data(data: np.ndarray, datatype: str) -> Any:\n+ if datatype == \"BYTES\":\n+ # tobytes is way faster than tolist, although it's harder to serialise\n+ # and only makes sense for actual bytes inputs (#253)\n+ return data.tobytes()\n+\n+ return data.flatten().tolist()\n+\n+\n @register_input_codec\n class NumpyCodec(InputCodec):\n \"\"\"\n@@ -49,19 +83,18 @@\n \n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n+ datatype = _to_datatype(payload.dtype)\n+\n return ResponseOutput(\n name=name,\n- datatype=_to_datatype(payload.dtype),\n+ datatype=datatype,\n shape=list(payload.shape),\n- data=payload.flatten().tolist(),\n+ data=_encode_data(payload, datatype),\n )\n \n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n- dtype = _to_dtype(request_input.datatype)\n- data = getattr(request_input.data, \"__root__\", request_input.data)\n-\n- model_data = np.array(data, dtype=dtype)\n+ model_data = _to_ndarray(request_input)\n \n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n", "issue": "Consider using `tobytes()` instead of `tolist()` in numpy codec for performance improvement\nOur use-case is to make multiple requests to MLServer and all of them will have a 640x640 image data in numpy format as input.\r\n\r\nGiven that `tobytes()` is way faster than `tolist()` does it make sense to support it by default in the numpy codec here: https://github.com/SeldonIO/MLServer/blob/61d9f078d1f886d14083551f33db6a3146f12745/mlserver/codecs/numpy.py#L54\r\n\r\n----\r\n\r\nPerformance comparison of the two methods:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom ..types import RequestInput, ResponseOutput\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import FirstInputRequestCodec\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"byte\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n\n\ndef _to_dtype(datatype: str) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[datatype]\n return np.dtype(dtype)\n\n\ndef _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Encodes a tensor as a numpy array.\n \"\"\"\n\n ContentType = \"np\"\n\n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n return ResponseOutput(\n name=name,\n datatype=_to_datatype(payload.dtype),\n shape=list(payload.shape),\n data=payload.flatten().tolist(),\n )\n\n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n dtype = _to_dtype(request_input.datatype)\n data = getattr(request_input.data, \"__root__\", request_input.data)\n\n model_data = np.array(data, dtype=dtype)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(FirstInputRequestCodec):\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}], "after_files": [{"content": "import numpy as np\n\nfrom typing import Any\n\nfrom ..types import RequestInput, ResponseOutput\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import FirstInputRequestCodec\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"bytes\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n_NumpyToDatatype[\"S\"] = \"BYTES\"\n\n\ndef _to_dtype(request_input: RequestInput) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[request_input.datatype]\n\n if request_input.datatype == \"BYTES\":\n # bytes have variable size, so need to specify as part of type\n # TODO: Make elem size variable (and not just the last dimension)\n elem_size = request_input.shape[-1]\n return np.dtype((dtype, elem_size))\n\n return np.dtype(dtype)\n\n\ndef _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n\n if as_str not in _NumpyToDatatype:\n # If not present, try with kind\n as_str = getattr(dtype, \"kind\")\n\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\ndef _to_ndarray(request_input: RequestInput) -> np.ndarray:\n data = getattr(request_input.data, \"__root__\", request_input.data)\n dtype = _to_dtype(request_input)\n\n if request_input.datatype == \"BYTES\":\n return np.frombuffer(data, dtype)\n\n return np.array(data, dtype)\n\n\ndef _encode_data(data: np.ndarray, datatype: str) -> Any:\n if datatype == \"BYTES\":\n # tobytes is way faster than tolist, although it's harder to serialise\n # and only makes sense for actual bytes inputs (#253)\n return data.tobytes()\n\n return data.flatten().tolist()\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Encodes a tensor as a numpy array.\n \"\"\"\n\n ContentType = \"np\"\n\n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n datatype = _to_datatype(payload.dtype)\n\n return ResponseOutput(\n name=name,\n datatype=datatype,\n shape=list(payload.shape),\n data=_encode_data(payload, datatype),\n )\n\n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n model_data = _to_ndarray(request_input)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(FirstInputRequestCodec):\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}]} | 1,152 | 796 |
gh_patches_debug_12399 | rasdani/github-patches | git_diff | encode__starlette-318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Form field names do not escape perfect encoding
I switched one microservice from Flask to Starlette and got unexpected form parsing problem.
This code will return %20 in places where spaces were supposed to be:
```
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse
from starlette.requests import Request as StarletteRequest
SERVER = Starlette()
@SERVER.route("/send", methods=["POST"])
async def send_endpoint(request: StarletteRequest) -> PlainTextResponse:
form = await request.form()
if "number" in form and "name" in form:
number = form["number"]
name = form["name"]
return PlainTextResponse("Number: " + number + " ; Name: " + name)
```
Maybe there are other issues with form parsing (x-www-form-urlencoded).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/formparsers.py`
Content:
```
1 import asyncio
2 import io
3 import tempfile
4 import typing
5 from enum import Enum
6 from urllib.parse import unquote_plus
7
8 from starlette.concurrency import run_in_threadpool
9 from starlette.datastructures import Headers
10
11 try:
12 from multipart.multipart import parse_options_header
13 import multipart
14 except ImportError: # pragma: nocover
15 parse_options_header = None # type: ignore
16 multipart = None # type: ignore
17
18
19 class FormMessage(Enum):
20 FIELD_START = 1
21 FIELD_NAME = 2
22 FIELD_DATA = 3
23 FIELD_END = 4
24 END = 5
25
26
27 class MultiPartMessage(Enum):
28 PART_BEGIN = 1
29 PART_DATA = 2
30 PART_END = 3
31 HEADER_FIELD = 4
32 HEADER_VALUE = 5
33 HEADER_END = 6
34 HEADERS_FINISHED = 7
35 END = 8
36
37
38 class UploadFile:
39 def __init__(self, filename: str) -> None:
40 self.filename = filename
41 self._file = io.BytesIO() # type: typing.IO[typing.Any]
42 self._loop = asyncio.get_event_loop()
43
44 def create_tempfile(self) -> None:
45 self._file = tempfile.SpooledTemporaryFile()
46
47 async def setup(self) -> None:
48 await run_in_threadpool(self.create_tempfile)
49
50 async def write(self, data: bytes) -> None:
51 await run_in_threadpool(self._file.write, data)
52
53 async def read(self, size: int = None) -> bytes:
54 return await run_in_threadpool(self._file.read, size)
55
56 async def seek(self, offset: int) -> None:
57 await run_in_threadpool(self._file.seek, offset)
58
59 async def close(self) -> None:
60 await run_in_threadpool(self._file.close)
61
62
63 class FormParser:
64 def __init__(
65 self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]
66 ) -> None:
67 assert (
68 multipart is not None
69 ), "The `python-multipart` library must be installed to use form parsing."
70 self.headers = headers
71 self.stream = stream
72 self.messages = [] # type: typing.List[typing.Tuple[FormMessage, bytes]]
73
74 def on_field_start(self) -> None:
75 message = (FormMessage.FIELD_START, b"")
76 self.messages.append(message)
77
78 def on_field_name(self, data: bytes, start: int, end: int) -> None:
79 message = (FormMessage.FIELD_NAME, data[start:end])
80 self.messages.append(message)
81
82 def on_field_data(self, data: bytes, start: int, end: int) -> None:
83 message = (FormMessage.FIELD_DATA, data[start:end])
84 self.messages.append(message)
85
86 def on_field_end(self) -> None:
87 message = (FormMessage.FIELD_END, b"")
88 self.messages.append(message)
89
90 def on_end(self) -> None:
91 message = (FormMessage.END, b"")
92 self.messages.append(message)
93
94 async def parse(self) -> typing.Dict[str, typing.Union[str, UploadFile]]:
95 # Callbacks dictionary.
96 callbacks = {
97 "on_field_start": self.on_field_start,
98 "on_field_name": self.on_field_name,
99 "on_field_data": self.on_field_data,
100 "on_field_end": self.on_field_end,
101 "on_end": self.on_end,
102 }
103
104 # Create the parser.
105 parser = multipart.QuerystringParser(callbacks)
106 field_name = b""
107 field_value = b""
108
109 result = {} # type: typing.Dict[str, typing.Union[str, UploadFile]]
110
111 # Feed the parser with data from the request.
112 async for chunk in self.stream:
113 if chunk:
114 parser.write(chunk)
115 else:
116 parser.finalize()
117 messages = list(self.messages)
118 self.messages.clear()
119 for message_type, message_bytes in messages:
120 if message_type == FormMessage.FIELD_START:
121 field_name = b""
122 field_value = b""
123 elif message_type == FormMessage.FIELD_NAME:
124 field_name += message_bytes
125 elif message_type == FormMessage.FIELD_DATA:
126 field_value += message_bytes
127 elif message_type == FormMessage.FIELD_END:
128 result[field_name.decode("latin-1")] = unquote_plus(
129 field_value.decode("latin-1")
130 )
131 elif message_type == FormMessage.END:
132 pass
133
134 return result
135
136
137 class MultiPartParser:
138 def __init__(
139 self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]
140 ) -> None:
141 assert (
142 multipart is not None
143 ), "The `python-multipart` library must be installed to use form parsing."
144 self.headers = headers
145 self.stream = stream
146 self.messages = [] # type: typing.List[typing.Tuple[MultiPartMessage, bytes]]
147
148 def on_part_begin(self) -> None:
149 message = (MultiPartMessage.PART_BEGIN, b"")
150 self.messages.append(message)
151
152 def on_part_data(self, data: bytes, start: int, end: int) -> None:
153 message = (MultiPartMessage.PART_DATA, data[start:end])
154 self.messages.append(message)
155
156 def on_part_end(self) -> None:
157 message = (MultiPartMessage.PART_END, b"")
158 self.messages.append(message)
159
160 def on_header_field(self, data: bytes, start: int, end: int) -> None:
161 message = (MultiPartMessage.HEADER_FIELD, data[start:end])
162 self.messages.append(message)
163
164 def on_header_value(self, data: bytes, start: int, end: int) -> None:
165 message = (MultiPartMessage.HEADER_VALUE, data[start:end])
166 self.messages.append(message)
167
168 def on_header_end(self) -> None:
169 message = (MultiPartMessage.HEADER_END, b"")
170 self.messages.append(message)
171
172 def on_headers_finished(self) -> None:
173 message = (MultiPartMessage.HEADERS_FINISHED, b"")
174 self.messages.append(message)
175
176 def on_end(self) -> None:
177 message = (MultiPartMessage.END, b"")
178 self.messages.append(message)
179
180 async def parse(self) -> typing.Dict[str, typing.Union[str, UploadFile]]:
181 # Parse the Content-Type header to get the multipart boundary.
182 content_type, params = parse_options_header(self.headers["Content-Type"])
183 boundary = params.get(b"boundary")
184
185 # Callbacks dictionary.
186 callbacks = {
187 "on_part_begin": self.on_part_begin,
188 "on_part_data": self.on_part_data,
189 "on_part_end": self.on_part_end,
190 "on_header_field": self.on_header_field,
191 "on_header_value": self.on_header_value,
192 "on_header_end": self.on_header_end,
193 "on_headers_finished": self.on_headers_finished,
194 "on_end": self.on_end,
195 }
196
197 # Create the parser.
198 parser = multipart.MultipartParser(boundary, callbacks)
199 header_field = b""
200 header_value = b""
201 raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]
202 field_name = ""
203 data = b""
204 file = None # type: typing.Optional[UploadFile]
205
206 result = {} # type: typing.Dict[str, typing.Union[str, UploadFile]]
207
208 # Feed the parser with data from the request.
209 async for chunk in self.stream:
210 parser.write(chunk)
211 messages = list(self.messages)
212 self.messages.clear()
213 for message_type, message_bytes in messages:
214 if message_type == MultiPartMessage.PART_BEGIN:
215 raw_headers = []
216 data = b""
217 elif message_type == MultiPartMessage.HEADER_FIELD:
218 header_field += message_bytes
219 elif message_type == MultiPartMessage.HEADER_VALUE:
220 header_value += message_bytes
221 elif message_type == MultiPartMessage.HEADER_END:
222 raw_headers.append((header_field.lower(), header_value))
223 header_field = b""
224 header_value = b""
225 elif message_type == MultiPartMessage.HEADERS_FINISHED:
226 headers = Headers(raw=raw_headers)
227 content_disposition = headers.get("Content-Disposition")
228 disposition, options = parse_options_header(content_disposition)
229 field_name = options[b"name"].decode("latin-1")
230 if b"filename" in options:
231 filename = options[b"filename"].decode("latin-1")
232 file = UploadFile(filename=filename)
233 await file.setup()
234 else:
235 file = None
236 elif message_type == MultiPartMessage.PART_DATA:
237 if file is None:
238 data += message_bytes
239 else:
240 await file.write(message_bytes)
241 elif message_type == MultiPartMessage.PART_END:
242 if file is None:
243 result[field_name] = data.decode("latin-1")
244 else:
245 await file.seek(0)
246 result[field_name] = file
247 elif message_type == MultiPartMessage.END:
248 pass
249
250 parser.finalize()
251 return result
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/formparsers.py b/starlette/formparsers.py
--- a/starlette/formparsers.py
+++ b/starlette/formparsers.py
@@ -125,9 +125,9 @@
elif message_type == FormMessage.FIELD_DATA:
field_value += message_bytes
elif message_type == FormMessage.FIELD_END:
- result[field_name.decode("latin-1")] = unquote_plus(
- field_value.decode("latin-1")
- )
+ name = unquote_plus(field_name.decode("latin-1"))
+ value = unquote_plus(field_value.decode("latin-1"))
+ result[name] = value
elif message_type == FormMessage.END:
pass
| {"golden_diff": "diff --git a/starlette/formparsers.py b/starlette/formparsers.py\n--- a/starlette/formparsers.py\n+++ b/starlette/formparsers.py\n@@ -125,9 +125,9 @@\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n- result[field_name.decode(\"latin-1\")] = unquote_plus(\n- field_value.decode(\"latin-1\")\n- )\n+ name = unquote_plus(field_name.decode(\"latin-1\"))\n+ value = unquote_plus(field_value.decode(\"latin-1\"))\n+ result[name] = value\n elif message_type == FormMessage.END:\n pass\n", "issue": "Form field names do not escape perfect encoding\nI switched one microservice from Flask to Starlette and got unexpected form parsing problem.\r\nThis code will return %20 in places where spaces were supposed to be:\r\n```\r\nfrom starlette.applications import Starlette\r\nfrom starlette.responses import PlainTextResponse\r\nfrom starlette.requests import Request as StarletteRequest\r\n\r\nSERVER = Starlette()\r\n\r\[email protected](\"/send\", methods=[\"POST\"])\r\nasync def send_endpoint(request: StarletteRequest) -> PlainTextResponse:\r\n form = await request.form()\r\n if \"number\" in form and \"name\" in form:\r\n number = form[\"number\"]\r\n name = form[\"name\"]\r\n return PlainTextResponse(\"Number: \" + number + \" ; Name: \" + name)\r\n```\r\n\r\nMaybe there are other issues with form parsing (x-www-form-urlencoded).\n", "before_files": [{"content": "import asyncio\nimport io\nimport tempfile\nimport typing\nfrom enum import Enum\nfrom urllib.parse import unquote_plus\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.datastructures import Headers\n\ntry:\n from multipart.multipart import parse_options_header\n import multipart\nexcept ImportError: # pragma: nocover\n parse_options_header = None # type: ignore\n multipart = None # type: ignore\n\n\nclass FormMessage(Enum):\n FIELD_START = 1\n FIELD_NAME = 2\n FIELD_DATA = 3\n FIELD_END = 4\n END = 5\n\n\nclass MultiPartMessage(Enum):\n PART_BEGIN = 1\n PART_DATA = 2\n PART_END = 3\n HEADER_FIELD = 4\n HEADER_VALUE = 5\n HEADER_END = 6\n HEADERS_FINISHED = 7\n END = 8\n\n\nclass UploadFile:\n def __init__(self, filename: str) -> None:\n self.filename = filename\n self._file = io.BytesIO() # type: typing.IO[typing.Any]\n self._loop = asyncio.get_event_loop()\n\n def create_tempfile(self) -> None:\n self._file = tempfile.SpooledTemporaryFile()\n\n async def setup(self) -> None:\n await run_in_threadpool(self.create_tempfile)\n\n async def write(self, data: bytes) -> None:\n await run_in_threadpool(self._file.write, data)\n\n async def read(self, size: int = None) -> bytes:\n return await run_in_threadpool(self._file.read, size)\n\n async def seek(self, offset: int) -> None:\n await run_in_threadpool(self._file.seek, offset)\n\n async def close(self) -> None:\n await run_in_threadpool(self._file.close)\n\n\nclass FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages = [] # type: typing.List[typing.Tuple[FormMessage, bytes]]\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> typing.Dict[str, typing.Union[str, UploadFile]]:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n result = {} # type: typing.Dict[str, typing.Union[str, UploadFile]]\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n result[field_name.decode(\"latin-1\")] = unquote_plus(\n field_value.decode(\"latin-1\")\n )\n elif message_type == FormMessage.END:\n pass\n\n return result\n\n\nclass MultiPartParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages = [] # type: typing.List[typing.Tuple[MultiPartMessage, bytes]]\n\n def on_part_begin(self) -> None:\n message = (MultiPartMessage.PART_BEGIN, b\"\")\n self.messages.append(message)\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.PART_DATA, data[start:end])\n self.messages.append(message)\n\n def on_part_end(self) -> None:\n message = (MultiPartMessage.PART_END, b\"\")\n self.messages.append(message)\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_FIELD, data[start:end])\n self.messages.append(message)\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_VALUE, data[start:end])\n self.messages.append(message)\n\n def on_header_end(self) -> None:\n message = (MultiPartMessage.HEADER_END, b\"\")\n self.messages.append(message)\n\n def on_headers_finished(self) -> None:\n message = (MultiPartMessage.HEADERS_FINISHED, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (MultiPartMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> typing.Dict[str, typing.Union[str, UploadFile]]:\n # Parse the Content-Type header to get the multipart boundary.\n content_type, params = parse_options_header(self.headers[\"Content-Type\"])\n boundary = params.get(b\"boundary\")\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.MultipartParser(boundary, callbacks)\n header_field = b\"\"\n header_value = b\"\"\n raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n field_name = \"\"\n data = b\"\"\n file = None # type: typing.Optional[UploadFile]\n\n result = {} # type: typing.Dict[str, typing.Union[str, UploadFile]]\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n parser.write(chunk)\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == MultiPartMessage.PART_BEGIN:\n raw_headers = []\n data = b\"\"\n elif message_type == MultiPartMessage.HEADER_FIELD:\n header_field += message_bytes\n elif message_type == MultiPartMessage.HEADER_VALUE:\n header_value += message_bytes\n elif message_type == MultiPartMessage.HEADER_END:\n raw_headers.append((header_field.lower(), header_value))\n header_field = b\"\"\n header_value = b\"\"\n elif message_type == MultiPartMessage.HEADERS_FINISHED:\n headers = Headers(raw=raw_headers)\n content_disposition = headers.get(\"Content-Disposition\")\n disposition, options = parse_options_header(content_disposition)\n field_name = options[b\"name\"].decode(\"latin-1\")\n if b\"filename\" in options:\n filename = options[b\"filename\"].decode(\"latin-1\")\n file = UploadFile(filename=filename)\n await file.setup()\n else:\n file = None\n elif message_type == MultiPartMessage.PART_DATA:\n if file is None:\n data += message_bytes\n else:\n await file.write(message_bytes)\n elif message_type == MultiPartMessage.PART_END:\n if file is None:\n result[field_name] = data.decode(\"latin-1\")\n else:\n await file.seek(0)\n result[field_name] = file\n elif message_type == MultiPartMessage.END:\n pass\n\n parser.finalize()\n return result\n", "path": "starlette/formparsers.py"}], "after_files": [{"content": "import asyncio\nimport io\nimport tempfile\nimport typing\nfrom enum import Enum\nfrom urllib.parse import unquote_plus\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.datastructures import Headers\n\ntry:\n from multipart.multipart import parse_options_header\n import multipart\nexcept ImportError: # pragma: nocover\n parse_options_header = None # type: ignore\n multipart = None # type: ignore\n\n\nclass FormMessage(Enum):\n FIELD_START = 1\n FIELD_NAME = 2\n FIELD_DATA = 3\n FIELD_END = 4\n END = 5\n\n\nclass MultiPartMessage(Enum):\n PART_BEGIN = 1\n PART_DATA = 2\n PART_END = 3\n HEADER_FIELD = 4\n HEADER_VALUE = 5\n HEADER_END = 6\n HEADERS_FINISHED = 7\n END = 8\n\n\nclass UploadFile:\n def __init__(self, filename: str) -> None:\n self.filename = filename\n self._file = io.BytesIO() # type: typing.IO[typing.Any]\n self._loop = asyncio.get_event_loop()\n\n def create_tempfile(self) -> None:\n self._file = tempfile.SpooledTemporaryFile()\n\n async def setup(self) -> None:\n await run_in_threadpool(self.create_tempfile)\n\n async def write(self, data: bytes) -> None:\n await run_in_threadpool(self._file.write, data)\n\n async def read(self, size: int = None) -> bytes:\n return await run_in_threadpool(self._file.read, size)\n\n async def seek(self, offset: int) -> None:\n await run_in_threadpool(self._file.seek, offset)\n\n async def close(self) -> None:\n await run_in_threadpool(self._file.close)\n\n\nclass FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages = [] # type: typing.List[typing.Tuple[FormMessage, bytes]]\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> typing.Dict[str, typing.Union[str, UploadFile]]:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n result = {} # type: typing.Dict[str, typing.Union[str, UploadFile]]\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n name = unquote_plus(field_name.decode(\"latin-1\"))\n value = unquote_plus(field_value.decode(\"latin-1\"))\n result[name] = value\n elif message_type == FormMessage.END:\n pass\n\n return result\n\n\nclass MultiPartParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages = [] # type: typing.List[typing.Tuple[MultiPartMessage, bytes]]\n\n def on_part_begin(self) -> None:\n message = (MultiPartMessage.PART_BEGIN, b\"\")\n self.messages.append(message)\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.PART_DATA, data[start:end])\n self.messages.append(message)\n\n def on_part_end(self) -> None:\n message = (MultiPartMessage.PART_END, b\"\")\n self.messages.append(message)\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_FIELD, data[start:end])\n self.messages.append(message)\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_VALUE, data[start:end])\n self.messages.append(message)\n\n def on_header_end(self) -> None:\n message = (MultiPartMessage.HEADER_END, b\"\")\n self.messages.append(message)\n\n def on_headers_finished(self) -> None:\n message = (MultiPartMessage.HEADERS_FINISHED, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (MultiPartMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> typing.Dict[str, typing.Union[str, UploadFile]]:\n # Parse the Content-Type header to get the multipart boundary.\n content_type, params = parse_options_header(self.headers[\"Content-Type\"])\n boundary = params.get(b\"boundary\")\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.MultipartParser(boundary, callbacks)\n header_field = b\"\"\n header_value = b\"\"\n raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n field_name = \"\"\n data = b\"\"\n file = None # type: typing.Optional[UploadFile]\n\n result = {} # type: typing.Dict[str, typing.Union[str, UploadFile]]\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n parser.write(chunk)\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == MultiPartMessage.PART_BEGIN:\n raw_headers = []\n data = b\"\"\n elif message_type == MultiPartMessage.HEADER_FIELD:\n header_field += message_bytes\n elif message_type == MultiPartMessage.HEADER_VALUE:\n header_value += message_bytes\n elif message_type == MultiPartMessage.HEADER_END:\n raw_headers.append((header_field.lower(), header_value))\n header_field = b\"\"\n header_value = b\"\"\n elif message_type == MultiPartMessage.HEADERS_FINISHED:\n headers = Headers(raw=raw_headers)\n content_disposition = headers.get(\"Content-Disposition\")\n disposition, options = parse_options_header(content_disposition)\n field_name = options[b\"name\"].decode(\"latin-1\")\n if b\"filename\" in options:\n filename = options[b\"filename\"].decode(\"latin-1\")\n file = UploadFile(filename=filename)\n await file.setup()\n else:\n file = None\n elif message_type == MultiPartMessage.PART_DATA:\n if file is None:\n data += message_bytes\n else:\n await file.write(message_bytes)\n elif message_type == MultiPartMessage.PART_END:\n if file is None:\n result[field_name] = data.decode(\"latin-1\")\n else:\n await file.seek(0)\n result[field_name] = file\n elif message_type == MultiPartMessage.END:\n pass\n\n parser.finalize()\n return result\n", "path": "starlette/formparsers.py"}]} | 3,035 | 152 |
gh_patches_debug_4406 | rasdani/github-patches | git_diff | pytorch__ignite-416 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModelCheckpoint parameter `save_as_state_dict` should default to True
Given that the recommended way to save model checkpoints is to only serialize the `state_dict` rather than the whole object (see https://pytorch.org/docs/stable/notes/serialization.html), shouldn't `save_as_state_dict` in `ModelCheckpoint` default to `True`?
Maybe I'm nitpicking but I think this would be a better default.
Here's the line: https://github.com/pytorch/ignite/blob/862ab0073e461a32bf5b53fa015f88c143ae9079/ignite/handlers/checkpoint.py#L84
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/handlers/checkpoint.py`
Content:
```
1 import os
2 import tempfile
3
4 import torch
5
6
7 class ModelCheckpoint(object):
8 """ ModelCheckpoint handler can be used to periodically save objects to disk.
9
10 This handler expects two arguments:
11
12 - an :class:`~ignite.engine.Engine` object
13 - a `dict` mapping names (`str`) to objects that should be saved to disk.
14
15 See Notes and Examples for further details.
16
17 Args:
18 dirname (str):
19 Directory path where objects will be saved.
20 filename_prefix (str):
21 Prefix for the filenames to which objects will be saved. See Notes
22 for more details.
23 save_interval (int, optional):
24 if not None, objects will be saved to disk every `save_interval` calls to the handler.
25 Exactly one of (`save_interval`, `score_function`) arguments must be provided.
26 score_function (callable, optional):
27 if not None, it should be a function taking a single argument,
28 an :class:`~ignite.engine.Engine` object,
29 and return a score (`float`). Objects with highest scores will be retained.
30 Exactly one of (`save_interval`, `score_function`) arguments must be provided.
31 score_name (str, optional):
32 if `score_function` not None, it is possible to store its absolute value using `score_name`. See Notes for
33 more details.
34 n_saved (int, optional):
35 Number of objects that should be kept on disk. Older files will be removed.
36 atomic (bool, optional):
37 If True, objects are serialized to a temporary file,
38 and then moved to final destination, so that files are
39 guaranteed to not be damaged (for example if exception occures during saving).
40 require_empty (bool, optional):
41 If True, will raise exception if there are any files starting with `filename_prefix`
42 in the directory 'dirname'.
43 create_dir (bool, optional):
44 If True, will create directory 'dirname' if it doesnt exist.
45 save_as_state_dict (bool, optional):
46 If True, will save only the `state_dict` of the objects specified, otherwise the whole object will be saved.
47
48 Notes:
49 This handler expects two arguments: an :class:`~ignite.engine.Engine` object and a `dict`
50 mapping names to objects that should be saved.
51
52 These names are used to specify filenames for saved objects.
53 Each filename has the following structure:
54 `{filename_prefix}_{name}_{step_number}.pth`.
55 Here, `filename_prefix` is the argument passed to the constructor,
56 `name` is the key in the aforementioned `dict`, and `step_number`
57 is incremented by `1` with every call to the handler.
58
59 If `score_function` is provided, user can store its absolute value using `score_name` in the filename.
60 Each filename can have the following structure:
61 `{filename_prefix}_{name}_{step_number}_{score_name}={abs(score_function_result)}.pth`.
62 For example, `score_name="val_loss"` and `score_function` that returns `-loss` (as objects with highest scores
63 will be retained), then saved models filenames will be `model_resnet_10_val_loss=0.1234.pth`.
64
65 Examples:
66 >>> import os
67 >>> from ignite.engine import Engine, Events
68 >>> from ignite.handlers import ModelCheckpoint
69 >>> from torch import nn
70 >>> trainer = Engine(lambda batch: None)
71 >>> handler = ModelCheckpoint('/tmp/models', 'myprefix', save_interval=2, n_saved=2, create_dir=True)
72 >>> model = nn.Linear(3, 3)
73 >>> trainer.add_event_handler(Events.EPOCH_COMPLETED, handler, {'mymodel': model})
74 >>> trainer.run([0], max_epochs=6)
75 >>> os.listdir('/tmp/models')
76 ['myprefix_mymodel_4.pth', 'myprefix_mymodel_6.pth']
77 """
78
79 def __init__(self, dirname, filename_prefix,
80 save_interval=None, score_function=None, score_name=None,
81 n_saved=1,
82 atomic=True, require_empty=True,
83 create_dir=True,
84 save_as_state_dict=False):
85
86 self._dirname = os.path.expanduser(dirname)
87 self._fname_prefix = filename_prefix
88 self._n_saved = n_saved
89 self._save_interval = save_interval
90 self._score_function = score_function
91 self._score_name = score_name
92 self._atomic = atomic
93 self._saved = [] # list of tuples (priority, saved_objects)
94 self._iteration = 0
95 self._save_as_state_dict = save_as_state_dict
96
97 if not (save_interval is None) ^ (score_function is None):
98 raise ValueError("Exactly one of `save_interval`, or `score_function` "
99 "arguments must be provided.")
100
101 if score_function is None and score_name is not None:
102 raise ValueError("If `score_name` is provided, then `score_function` "
103 "should be also provided.")
104
105 if create_dir:
106 if not os.path.exists(dirname):
107 os.makedirs(dirname)
108
109 # Ensure that dirname exists
110 if not os.path.exists(dirname):
111 raise ValueError("Directory path '{}' is not found.".format(dirname))
112
113 if require_empty:
114 matched = [fname
115 for fname in os.listdir(dirname)
116 if fname.startswith(self._fname_prefix)]
117
118 if len(matched) > 0:
119 raise ValueError("Files prefixed with {} are already present "
120 "in the directory {}. If you want to use this "
121 "directory anyway, pass `require_empty=False`."
122 "".format(filename_prefix, dirname))
123
124 def _save(self, obj, path):
125 if not self._atomic:
126 self._internal_save(obj, path)
127 else:
128 tmp = tempfile.NamedTemporaryFile(delete=False, dir=self._dirname)
129 try:
130 self._internal_save(obj, tmp.file)
131 except BaseException:
132 tmp.close()
133 os.remove(tmp.name)
134 raise
135 else:
136 tmp.close()
137 os.rename(tmp.name, path)
138
139 def _internal_save(self, obj, path):
140 if not self._save_as_state_dict:
141 torch.save(obj, path)
142 else:
143 if not hasattr(obj, "state_dict") or not callable(obj.state_dict):
144 raise ValueError("Object should have `state_dict` method.")
145 torch.save(obj.state_dict(), path)
146
147 def __call__(self, engine, to_save):
148 if len(to_save) == 0:
149 raise RuntimeError("No objects to checkpoint found.")
150
151 self._iteration += 1
152
153 if self._score_function is not None:
154 priority = self._score_function(engine)
155
156 else:
157 priority = self._iteration
158 if (self._iteration % self._save_interval) != 0:
159 return
160
161 if (len(self._saved) < self._n_saved) or (self._saved[0][0] < priority):
162 saved_objs = []
163
164 suffix = ""
165 if self._score_name is not None:
166 suffix = "_{}={:.7}".format(self._score_name, abs(priority))
167
168 for name, obj in to_save.items():
169 fname = '{}_{}_{}{}.pth'.format(self._fname_prefix, name, self._iteration, suffix)
170 path = os.path.join(self._dirname, fname)
171
172 self._save(obj=obj, path=path)
173 saved_objs.append(path)
174
175 self._saved.append((priority, saved_objs))
176 self._saved.sort(key=lambda item: item[0])
177
178 if len(self._saved) > self._n_saved:
179 _, paths = self._saved.pop(0)
180 for p in paths:
181 os.remove(p)
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py
--- a/ignite/handlers/checkpoint.py
+++ b/ignite/handlers/checkpoint.py
@@ -81,7 +81,7 @@
n_saved=1,
atomic=True, require_empty=True,
create_dir=True,
- save_as_state_dict=False):
+ save_as_state_dict=True):
self._dirname = os.path.expanduser(dirname)
self._fname_prefix = filename_prefix
| {"golden_diff": "diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py\n--- a/ignite/handlers/checkpoint.py\n+++ b/ignite/handlers/checkpoint.py\n@@ -81,7 +81,7 @@\n n_saved=1,\n atomic=True, require_empty=True,\n create_dir=True,\n- save_as_state_dict=False):\n+ save_as_state_dict=True):\n \n self._dirname = os.path.expanduser(dirname)\n self._fname_prefix = filename_prefix\n", "issue": "ModelCheckpoint parameter `save_as_state_dict` should default to True\nGiven that the recommended way to save model checkpoints is to only serialize the `state_dict` rather than the whole object (see https://pytorch.org/docs/stable/notes/serialization.html), shouldn't `save_as_state_dict` in `ModelCheckpoint` default to `True`?\r\n\r\nMaybe I'm nitpicking but I think this would be a better default.\r\n\r\nHere's the line: https://github.com/pytorch/ignite/blob/862ab0073e461a32bf5b53fa015f88c143ae9079/ignite/handlers/checkpoint.py#L84\n", "before_files": [{"content": "import os\nimport tempfile\n\nimport torch\n\n\nclass ModelCheckpoint(object):\n \"\"\" ModelCheckpoint handler can be used to periodically save objects to disk.\n\n This handler expects two arguments:\n\n - an :class:`~ignite.engine.Engine` object\n - a `dict` mapping names (`str`) to objects that should be saved to disk.\n\n See Notes and Examples for further details.\n\n Args:\n dirname (str):\n Directory path where objects will be saved.\n filename_prefix (str):\n Prefix for the filenames to which objects will be saved. See Notes\n for more details.\n save_interval (int, optional):\n if not None, objects will be saved to disk every `save_interval` calls to the handler.\n Exactly one of (`save_interval`, `score_function`) arguments must be provided.\n score_function (callable, optional):\n if not None, it should be a function taking a single argument,\n an :class:`~ignite.engine.Engine` object,\n and return a score (`float`). Objects with highest scores will be retained.\n Exactly one of (`save_interval`, `score_function`) arguments must be provided.\n score_name (str, optional):\n if `score_function` not None, it is possible to store its absolute value using `score_name`. See Notes for\n more details.\n n_saved (int, optional):\n Number of objects that should be kept on disk. Older files will be removed.\n atomic (bool, optional):\n If True, objects are serialized to a temporary file,\n and then moved to final destination, so that files are\n guaranteed to not be damaged (for example if exception occures during saving).\n require_empty (bool, optional):\n If True, will raise exception if there are any files starting with `filename_prefix`\n in the directory 'dirname'.\n create_dir (bool, optional):\n If True, will create directory 'dirname' if it doesnt exist.\n save_as_state_dict (bool, optional):\n If True, will save only the `state_dict` of the objects specified, otherwise the whole object will be saved.\n\n Notes:\n This handler expects two arguments: an :class:`~ignite.engine.Engine` object and a `dict`\n mapping names to objects that should be saved.\n\n These names are used to specify filenames for saved objects.\n Each filename has the following structure:\n `{filename_prefix}_{name}_{step_number}.pth`.\n Here, `filename_prefix` is the argument passed to the constructor,\n `name` is the key in the aforementioned `dict`, and `step_number`\n is incremented by `1` with every call to the handler.\n\n If `score_function` is provided, user can store its absolute value using `score_name` in the filename.\n Each filename can have the following structure:\n `{filename_prefix}_{name}_{step_number}_{score_name}={abs(score_function_result)}.pth`.\n For example, `score_name=\"val_loss\"` and `score_function` that returns `-loss` (as objects with highest scores\n will be retained), then saved models filenames will be `model_resnet_10_val_loss=0.1234.pth`.\n\n Examples:\n >>> import os\n >>> from ignite.engine import Engine, Events\n >>> from ignite.handlers import ModelCheckpoint\n >>> from torch import nn\n >>> trainer = Engine(lambda batch: None)\n >>> handler = ModelCheckpoint('/tmp/models', 'myprefix', save_interval=2, n_saved=2, create_dir=True)\n >>> model = nn.Linear(3, 3)\n >>> trainer.add_event_handler(Events.EPOCH_COMPLETED, handler, {'mymodel': model})\n >>> trainer.run([0], max_epochs=6)\n >>> os.listdir('/tmp/models')\n ['myprefix_mymodel_4.pth', 'myprefix_mymodel_6.pth']\n \"\"\"\n\n def __init__(self, dirname, filename_prefix,\n save_interval=None, score_function=None, score_name=None,\n n_saved=1,\n atomic=True, require_empty=True,\n create_dir=True,\n save_as_state_dict=False):\n\n self._dirname = os.path.expanduser(dirname)\n self._fname_prefix = filename_prefix\n self._n_saved = n_saved\n self._save_interval = save_interval\n self._score_function = score_function\n self._score_name = score_name\n self._atomic = atomic\n self._saved = [] # list of tuples (priority, saved_objects)\n self._iteration = 0\n self._save_as_state_dict = save_as_state_dict\n\n if not (save_interval is None) ^ (score_function is None):\n raise ValueError(\"Exactly one of `save_interval`, or `score_function` \"\n \"arguments must be provided.\")\n\n if score_function is None and score_name is not None:\n raise ValueError(\"If `score_name` is provided, then `score_function` \"\n \"should be also provided.\")\n\n if create_dir:\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n # Ensure that dirname exists\n if not os.path.exists(dirname):\n raise ValueError(\"Directory path '{}' is not found.\".format(dirname))\n\n if require_empty:\n matched = [fname\n for fname in os.listdir(dirname)\n if fname.startswith(self._fname_prefix)]\n\n if len(matched) > 0:\n raise ValueError(\"Files prefixed with {} are already present \"\n \"in the directory {}. If you want to use this \"\n \"directory anyway, pass `require_empty=False`.\"\n \"\".format(filename_prefix, dirname))\n\n def _save(self, obj, path):\n if not self._atomic:\n self._internal_save(obj, path)\n else:\n tmp = tempfile.NamedTemporaryFile(delete=False, dir=self._dirname)\n try:\n self._internal_save(obj, tmp.file)\n except BaseException:\n tmp.close()\n os.remove(tmp.name)\n raise\n else:\n tmp.close()\n os.rename(tmp.name, path)\n\n def _internal_save(self, obj, path):\n if not self._save_as_state_dict:\n torch.save(obj, path)\n else:\n if not hasattr(obj, \"state_dict\") or not callable(obj.state_dict):\n raise ValueError(\"Object should have `state_dict` method.\")\n torch.save(obj.state_dict(), path)\n\n def __call__(self, engine, to_save):\n if len(to_save) == 0:\n raise RuntimeError(\"No objects to checkpoint found.\")\n\n self._iteration += 1\n\n if self._score_function is not None:\n priority = self._score_function(engine)\n\n else:\n priority = self._iteration\n if (self._iteration % self._save_interval) != 0:\n return\n\n if (len(self._saved) < self._n_saved) or (self._saved[0][0] < priority):\n saved_objs = []\n\n suffix = \"\"\n if self._score_name is not None:\n suffix = \"_{}={:.7}\".format(self._score_name, abs(priority))\n\n for name, obj in to_save.items():\n fname = '{}_{}_{}{}.pth'.format(self._fname_prefix, name, self._iteration, suffix)\n path = os.path.join(self._dirname, fname)\n\n self._save(obj=obj, path=path)\n saved_objs.append(path)\n\n self._saved.append((priority, saved_objs))\n self._saved.sort(key=lambda item: item[0])\n\n if len(self._saved) > self._n_saved:\n _, paths = self._saved.pop(0)\n for p in paths:\n os.remove(p)\n", "path": "ignite/handlers/checkpoint.py"}], "after_files": [{"content": "import os\nimport tempfile\n\nimport torch\n\n\nclass ModelCheckpoint(object):\n \"\"\" ModelCheckpoint handler can be used to periodically save objects to disk.\n\n This handler expects two arguments:\n\n - an :class:`~ignite.engine.Engine` object\n - a `dict` mapping names (`str`) to objects that should be saved to disk.\n\n See Notes and Examples for further details.\n\n Args:\n dirname (str):\n Directory path where objects will be saved.\n filename_prefix (str):\n Prefix for the filenames to which objects will be saved. See Notes\n for more details.\n save_interval (int, optional):\n if not None, objects will be saved to disk every `save_interval` calls to the handler.\n Exactly one of (`save_interval`, `score_function`) arguments must be provided.\n score_function (callable, optional):\n if not None, it should be a function taking a single argument,\n an :class:`~ignite.engine.Engine` object,\n and return a score (`float`). Objects with highest scores will be retained.\n Exactly one of (`save_interval`, `score_function`) arguments must be provided.\n score_name (str, optional):\n if `score_function` not None, it is possible to store its absolute value using `score_name`. See Notes for\n more details.\n n_saved (int, optional):\n Number of objects that should be kept on disk. Older files will be removed.\n atomic (bool, optional):\n If True, objects are serialized to a temporary file,\n and then moved to final destination, so that files are\n guaranteed to not be damaged (for example if exception occures during saving).\n require_empty (bool, optional):\n If True, will raise exception if there are any files starting with `filename_prefix`\n in the directory 'dirname'.\n create_dir (bool, optional):\n If True, will create directory 'dirname' if it doesnt exist.\n save_as_state_dict (bool, optional):\n If True, will save only the `state_dict` of the objects specified, otherwise the whole object will be saved.\n\n Notes:\n This handler expects two arguments: an :class:`~ignite.engine.Engine` object and a `dict`\n mapping names to objects that should be saved.\n\n These names are used to specify filenames for saved objects.\n Each filename has the following structure:\n `{filename_prefix}_{name}_{step_number}.pth`.\n Here, `filename_prefix` is the argument passed to the constructor,\n `name` is the key in the aforementioned `dict`, and `step_number`\n is incremented by `1` with every call to the handler.\n\n If `score_function` is provided, user can store its absolute value using `score_name` in the filename.\n Each filename can have the following structure:\n `{filename_prefix}_{name}_{step_number}_{score_name}={abs(score_function_result)}.pth`.\n For example, `score_name=\"val_loss\"` and `score_function` that returns `-loss` (as objects with highest scores\n will be retained), then saved models filenames will be `model_resnet_10_val_loss=0.1234.pth`.\n\n Examples:\n >>> import os\n >>> from ignite.engine import Engine, Events\n >>> from ignite.handlers import ModelCheckpoint\n >>> from torch import nn\n >>> trainer = Engine(lambda batch: None)\n >>> handler = ModelCheckpoint('/tmp/models', 'myprefix', save_interval=2, n_saved=2, create_dir=True)\n >>> model = nn.Linear(3, 3)\n >>> trainer.add_event_handler(Events.EPOCH_COMPLETED, handler, {'mymodel': model})\n >>> trainer.run([0], max_epochs=6)\n >>> os.listdir('/tmp/models')\n ['myprefix_mymodel_4.pth', 'myprefix_mymodel_6.pth']\n \"\"\"\n\n def __init__(self, dirname, filename_prefix,\n save_interval=None, score_function=None, score_name=None,\n n_saved=1,\n atomic=True, require_empty=True,\n create_dir=True,\n save_as_state_dict=True):\n\n self._dirname = os.path.expanduser(dirname)\n self._fname_prefix = filename_prefix\n self._n_saved = n_saved\n self._save_interval = save_interval\n self._score_function = score_function\n self._score_name = score_name\n self._atomic = atomic\n self._saved = [] # list of tuples (priority, saved_objects)\n self._iteration = 0\n self._save_as_state_dict = save_as_state_dict\n\n if not (save_interval is None) ^ (score_function is None):\n raise ValueError(\"Exactly one of `save_interval`, or `score_function` \"\n \"arguments must be provided.\")\n\n if score_function is None and score_name is not None:\n raise ValueError(\"If `score_name` is provided, then `score_function` \"\n \"should be also provided.\")\n\n if create_dir:\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n # Ensure that dirname exists\n if not os.path.exists(dirname):\n raise ValueError(\"Directory path '{}' is not found.\".format(dirname))\n\n if require_empty:\n matched = [fname\n for fname in os.listdir(dirname)\n if fname.startswith(self._fname_prefix)]\n\n if len(matched) > 0:\n raise ValueError(\"Files prefixed with {} are already present \"\n \"in the directory {}. If you want to use this \"\n \"directory anyway, pass `require_empty=False`.\"\n \"\".format(filename_prefix, dirname))\n\n def _save(self, obj, path):\n if not self._atomic:\n self._internal_save(obj, path)\n else:\n tmp = tempfile.NamedTemporaryFile(delete=False, dir=self._dirname)\n try:\n self._internal_save(obj, tmp.file)\n except BaseException:\n tmp.close()\n os.remove(tmp.name)\n raise\n else:\n tmp.close()\n os.rename(tmp.name, path)\n\n def _internal_save(self, obj, path):\n if not self._save_as_state_dict:\n torch.save(obj, path)\n else:\n if not hasattr(obj, \"state_dict\") or not callable(obj.state_dict):\n raise ValueError(\"Object should have `state_dict` method.\")\n torch.save(obj.state_dict(), path)\n\n def __call__(self, engine, to_save):\n if len(to_save) == 0:\n raise RuntimeError(\"No objects to checkpoint found.\")\n\n self._iteration += 1\n\n if self._score_function is not None:\n priority = self._score_function(engine)\n\n else:\n priority = self._iteration\n if (self._iteration % self._save_interval) != 0:\n return\n\n if (len(self._saved) < self._n_saved) or (self._saved[0][0] < priority):\n saved_objs = []\n\n suffix = \"\"\n if self._score_name is not None:\n suffix = \"_{}={:.7}\".format(self._score_name, abs(priority))\n\n for name, obj in to_save.items():\n fname = '{}_{}_{}{}.pth'.format(self._fname_prefix, name, self._iteration, suffix)\n path = os.path.join(self._dirname, fname)\n\n self._save(obj=obj, path=path)\n saved_objs.append(path)\n\n self._saved.append((priority, saved_objs))\n self._saved.sort(key=lambda item: item[0])\n\n if len(self._saved) > self._n_saved:\n _, paths = self._saved.pop(0)\n for p in paths:\n os.remove(p)\n", "path": "ignite/handlers/checkpoint.py"}]} | 2,483 | 112 |
gh_patches_debug_31666 | rasdani/github-patches | git_diff | openai__gym-1379 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2 x # TODO: depreciation warning in registration.py
`# TODO: Add the following deprecation warning after 2017/02/18`
and
`# TODO: Add the following deprecation warning after 2017/03/01`
(both unactioned) appear in gym/gym/envs/registration.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/envs/registration.py`
Content:
```
1 import re
2 import importlib
3 from gym import error, logger
4
5 # This format is true today, but it's *not* an official spec.
6 # [username/](env-name)-v(version) env-name is group 1, version is group 2
7 #
8 # 2016-10-31: We're experimentally expanding the environment ID format
9 # to include an optional username.
10 env_id_re = re.compile(r'^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$')
11
12
13 def load(name):
14 mod_name, attr_name = name.split(":")
15 mod = importlib.import_module(mod_name)
16 fn = getattr(mod, attr_name)
17 return fn
18
19
20 class EnvSpec(object):
21 """A specification for a particular instance of the environment. Used
22 to register the parameters for official evaluations.
23
24 Args:
25 id (str): The official environment ID
26 entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
27 trials (int): The number of trials to average reward over
28 reward_threshold (Optional[int]): The reward threshold before the task is considered solved
29 local_only: True if the environment is to be used only on the local machine (e.g. debugging envs)
30 kwargs (dict): The kwargs to pass to the environment class
31 nondeterministic (bool): Whether this environment is non-deterministic even after seeding
32 tags (dict[str:any]): A set of arbitrary key-value tags on this environment, including simple property=True tags
33
34 Attributes:
35 id (str): The official environment ID
36 trials (int): The number of trials run in official evaluation
37 """
38
39 def __init__(self, id, entry_point=None, trials=100, reward_threshold=None, local_only=False, kwargs=None, nondeterministic=False, tags=None, max_episode_steps=None, max_episode_seconds=None, timestep_limit=None):
40 self.id = id
41 # Evaluation parameters
42 self.trials = trials
43 self.reward_threshold = reward_threshold
44 # Environment properties
45 self.nondeterministic = nondeterministic
46
47 if tags is None:
48 tags = {}
49 self.tags = tags
50
51 # BACKWARDS COMPAT 2017/1/18
52 if tags.get('wrapper_config.TimeLimit.max_episode_steps'):
53 max_episode_steps = tags.get('wrapper_config.TimeLimit.max_episode_steps')
54 # TODO: Add the following deprecation warning after 2017/02/18
55 # warnings.warn("DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update universe past version 0.21.3")
56
57 tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps
58 ######
59
60 # BACKWARDS COMPAT 2017/1/31
61 if timestep_limit is not None:
62 max_episode_steps = timestep_limit
63 # TODO: Add the following deprecation warning after 2017/03/01
64 # warnings.warn("register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.".format(timestep_limit, timestep_limit))
65 ######
66
67 self.max_episode_steps = max_episode_steps
68 self.max_episode_seconds = max_episode_seconds
69
70 # We may make some of these other parameters public if they're
71 # useful.
72 match = env_id_re.search(id)
73 if not match:
74 raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))
75 self._env_name = match.group(1)
76 self._entry_point = entry_point
77 self._local_only = local_only
78 self._kwargs = {} if kwargs is None else kwargs
79
80 def make(self, **kwargs):
81 """Instantiates an instance of the environment with appropriate kwargs"""
82 if self._entry_point is None:
83 raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
84 _kwargs = self._kwargs.copy()
85 _kwargs.update(kwargs)
86 if callable(self._entry_point):
87 env = self._entry_point(**_kwargs)
88 else:
89 cls = load(self._entry_point)
90 env = cls(**_kwargs)
91
92 # Make the enviroment aware of which spec it came from.
93 env.unwrapped.spec = self
94
95 return env
96
97 def __repr__(self):
98 return "EnvSpec({})".format(self.id)
99
100 @property
101 def timestep_limit(self):
102 return self.max_episode_steps
103
104 @timestep_limit.setter
105 def timestep_limit(self, value):
106 self.max_episode_steps = value
107
108
109 class EnvRegistry(object):
110 """Register an env by ID. IDs remain stable over time and are
111 guaranteed to resolve to the same environment dynamics (or be
112 desupported). The goal is that results on a particular environment
113 should always be comparable, and not depend on the version of the
114 code that was running.
115 """
116
117 def __init__(self):
118 self.env_specs = {}
119
120 def make(self, path, **kwargs):
121 if len(kwargs) > 0:
122 logger.info('Making new env: %s (%s)', path, kwargs)
123 else:
124 logger.info('Making new env: %s', path)
125 spec = self.spec(path)
126 env = spec.make(**kwargs)
127 # We used to have people override _reset/_step rather than
128 # reset/step. Set _gym_disable_underscore_compat = True on
129 # your environment if you use these methods and don't want
130 # compatibility code to be invoked.
131 if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
132 patch_deprecated_methods(env)
133 if (env.spec.timestep_limit is not None) and not spec.tags.get('vnc'):
134 from gym.wrappers.time_limit import TimeLimit
135 env = TimeLimit(env,
136 max_episode_steps=env.spec.max_episode_steps,
137 max_episode_seconds=env.spec.max_episode_seconds)
138 return env
139
140
141 def all(self):
142 return self.env_specs.values()
143
144 def spec(self, path):
145 if ':' in path:
146 mod_name, _sep, id = path.partition(':')
147 try:
148 importlib.import_module(mod_name)
149 except ModuleNotFoundError:
150 raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))
151 else:
152 id = path
153
154 match = env_id_re.search(id)
155 if not match:
156 raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))
157
158 try:
159 return self.env_specs[id]
160 except KeyError:
161 # Parse the env name and check to see if it matches the non-version
162 # part of a valid env (could also check the exact number here)
163 env_name = match.group(1)
164 matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()
165 if env_name == valid_env_spec._env_name]
166 if matching_envs:
167 raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))
168 else:
169 raise error.UnregisteredEnv('No registered env with id: {}'.format(id))
170
171 def register(self, id, **kwargs):
172 if id in self.env_specs:
173 raise error.Error('Cannot re-register id: {}'.format(id))
174 self.env_specs[id] = EnvSpec(id, **kwargs)
175
176 # Have a global registry
177 registry = EnvRegistry()
178
179 def register(id, **kwargs):
180 return registry.register(id, **kwargs)
181
182 def make(id, **kwargs):
183 return registry.make(id, **kwargs)
184
185 def spec(id):
186 return registry.spec(id)
187
188 warn_once = True
189
190 def patch_deprecated_methods(env):
191 """
192 Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.
193 For backward compatibility, this makes it possible to work with unmodified environments.
194 """
195 global warn_once
196 if warn_once:
197 logger.warn("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env)))
198 warn_once = False
199 env.reset = env._reset
200 env.step = env._step
201 env.seed = env._seed
202 def render(mode):
203 return env._render(mode, close=False)
204 def close():
205 env._render("human", close=True)
206 env.render = render
207 env.close = close
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/envs/registration.py b/gym/envs/registration.py
--- a/gym/envs/registration.py
+++ b/gym/envs/registration.py
@@ -51,8 +51,7 @@
# BACKWARDS COMPAT 2017/1/18
if tags.get('wrapper_config.TimeLimit.max_episode_steps'):
max_episode_steps = tags.get('wrapper_config.TimeLimit.max_episode_steps')
- # TODO: Add the following deprecation warning after 2017/02/18
- # warnings.warn("DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update universe past version 0.21.3")
+ warnings.warn("DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update switch from universe 0.21.3 to retro (https://github.com/openai/retro)")
tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps
######
@@ -60,9 +59,7 @@
# BACKWARDS COMPAT 2017/1/31
if timestep_limit is not None:
max_episode_steps = timestep_limit
- # TODO: Add the following deprecation warning after 2017/03/01
- # warnings.warn("register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.".format(timestep_limit, timestep_limit))
- ######
+ warnings.warn("register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.".format(timestep_limit, timestep_limit))
self.max_episode_steps = max_episode_steps
self.max_episode_seconds = max_episode_seconds
| {"golden_diff": "diff --git a/gym/envs/registration.py b/gym/envs/registration.py\n--- a/gym/envs/registration.py\n+++ b/gym/envs/registration.py\n@@ -51,8 +51,7 @@\n # BACKWARDS COMPAT 2017/1/18\n if tags.get('wrapper_config.TimeLimit.max_episode_steps'):\n max_episode_steps = tags.get('wrapper_config.TimeLimit.max_episode_steps')\n- # TODO: Add the following deprecation warning after 2017/02/18\n- # warnings.warn(\"DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update universe past version 0.21.3\")\n+ warnings.warn(\"DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update switch from universe 0.21.3 to retro (https://github.com/openai/retro)\")\n \n tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps\n ######\n@@ -60,9 +59,7 @@\n # BACKWARDS COMPAT 2017/1/31\n if timestep_limit is not None:\n max_episode_steps = timestep_limit\n- # TODO: Add the following deprecation warning after 2017/03/01\n- # warnings.warn(\"register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.\".format(timestep_limit, timestep_limit))\n- ######\n+ warnings.warn(\"register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.\".format(timestep_limit, timestep_limit))\n \n self.max_episode_steps = max_episode_steps\n self.max_episode_seconds = max_episode_seconds\n", "issue": "2 x # TODO: depreciation warning in registration.py\n`# TODO: Add the following deprecation warning after 2017/02/18`\r\nand\r\n`# TODO: Add the following deprecation warning after 2017/03/01`\r\n(both unactioned) appear in gym/gym/envs/registration.py\n", "before_files": [{"content": "import re\nimport importlib\nfrom gym import error, logger\n\n# This format is true today, but it's *not* an official spec.\n# [username/](env-name)-v(version) env-name is group 1, version is group 2\n#\n# 2016-10-31: We're experimentally expanding the environment ID format\n# to include an optional username.\nenv_id_re = re.compile(r'^(?:[\\w:-]+\\/)?([\\w:.-]+)-v(\\d+)$')\n\n\ndef load(name):\n mod_name, attr_name = name.split(\":\")\n mod = importlib.import_module(mod_name)\n fn = getattr(mod, attr_name)\n return fn\n\n\nclass EnvSpec(object):\n \"\"\"A specification for a particular instance of the environment. Used\n to register the parameters for official evaluations.\n\n Args:\n id (str): The official environment ID\n entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)\n trials (int): The number of trials to average reward over\n reward_threshold (Optional[int]): The reward threshold before the task is considered solved\n local_only: True if the environment is to be used only on the local machine (e.g. debugging envs)\n kwargs (dict): The kwargs to pass to the environment class\n nondeterministic (bool): Whether this environment is non-deterministic even after seeding\n tags (dict[str:any]): A set of arbitrary key-value tags on this environment, including simple property=True tags\n\n Attributes:\n id (str): The official environment ID\n trials (int): The number of trials run in official evaluation\n \"\"\"\n\n def __init__(self, id, entry_point=None, trials=100, reward_threshold=None, local_only=False, kwargs=None, nondeterministic=False, tags=None, max_episode_steps=None, max_episode_seconds=None, timestep_limit=None):\n self.id = id\n # Evaluation parameters\n self.trials = trials\n self.reward_threshold = reward_threshold\n # Environment properties\n self.nondeterministic = nondeterministic\n\n if tags is None:\n tags = {}\n self.tags = tags\n\n # BACKWARDS COMPAT 2017/1/18\n if tags.get('wrapper_config.TimeLimit.max_episode_steps'):\n max_episode_steps = tags.get('wrapper_config.TimeLimit.max_episode_steps')\n # TODO: Add the following deprecation warning after 2017/02/18\n # warnings.warn(\"DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update universe past version 0.21.3\")\n\n tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps\n ######\n\n # BACKWARDS COMPAT 2017/1/31\n if timestep_limit is not None:\n max_episode_steps = timestep_limit\n # TODO: Add the following deprecation warning after 2017/03/01\n # warnings.warn(\"register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.\".format(timestep_limit, timestep_limit))\n ######\n\n self.max_episode_steps = max_episode_steps\n self.max_episode_seconds = max_episode_seconds\n\n # We may make some of these other parameters public if they're\n # useful.\n match = env_id_re.search(id)\n if not match:\n raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))\n self._env_name = match.group(1)\n self._entry_point = entry_point\n self._local_only = local_only\n self._kwargs = {} if kwargs is None else kwargs\n\n def make(self, **kwargs):\n \"\"\"Instantiates an instance of the environment with appropriate kwargs\"\"\"\n if self._entry_point is None:\n raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))\n _kwargs = self._kwargs.copy()\n _kwargs.update(kwargs)\n if callable(self._entry_point):\n env = self._entry_point(**_kwargs)\n else:\n cls = load(self._entry_point)\n env = cls(**_kwargs)\n\n # Make the enviroment aware of which spec it came from.\n env.unwrapped.spec = self\n\n return env\n\n def __repr__(self):\n return \"EnvSpec({})\".format(self.id)\n\n @property\n def timestep_limit(self):\n return self.max_episode_steps\n\n @timestep_limit.setter\n def timestep_limit(self, value):\n self.max_episode_steps = value\n\n\nclass EnvRegistry(object):\n \"\"\"Register an env by ID. IDs remain stable over time and are\n guaranteed to resolve to the same environment dynamics (or be\n desupported). The goal is that results on a particular environment\n should always be comparable, and not depend on the version of the\n code that was running.\n \"\"\"\n\n def __init__(self):\n self.env_specs = {}\n\n def make(self, path, **kwargs):\n if len(kwargs) > 0:\n logger.info('Making new env: %s (%s)', path, kwargs)\n else:\n logger.info('Making new env: %s', path)\n spec = self.spec(path)\n env = spec.make(**kwargs)\n # We used to have people override _reset/_step rather than\n # reset/step. Set _gym_disable_underscore_compat = True on\n # your environment if you use these methods and don't want\n # compatibility code to be invoked.\n if hasattr(env, \"_reset\") and hasattr(env, \"_step\") and not getattr(env, \"_gym_disable_underscore_compat\", False):\n patch_deprecated_methods(env)\n if (env.spec.timestep_limit is not None) and not spec.tags.get('vnc'):\n from gym.wrappers.time_limit import TimeLimit\n env = TimeLimit(env,\n max_episode_steps=env.spec.max_episode_steps,\n max_episode_seconds=env.spec.max_episode_seconds)\n return env\n\n\n def all(self):\n return self.env_specs.values()\n\n def spec(self, path):\n if ':' in path:\n mod_name, _sep, id = path.partition(':')\n try:\n importlib.import_module(mod_name)\n except ModuleNotFoundError:\n raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))\n else:\n id = path\n\n match = env_id_re.search(id)\n if not match:\n raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))\n\n try:\n return self.env_specs[id]\n except KeyError:\n # Parse the env name and check to see if it matches the non-version\n # part of a valid env (could also check the exact number here)\n env_name = match.group(1)\n matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()\n if env_name == valid_env_spec._env_name]\n if matching_envs:\n raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))\n else:\n raise error.UnregisteredEnv('No registered env with id: {}'.format(id))\n\n def register(self, id, **kwargs):\n if id in self.env_specs:\n raise error.Error('Cannot re-register id: {}'.format(id))\n self.env_specs[id] = EnvSpec(id, **kwargs)\n\n# Have a global registry\nregistry = EnvRegistry()\n\ndef register(id, **kwargs):\n return registry.register(id, **kwargs)\n\ndef make(id, **kwargs):\n return registry.make(id, **kwargs)\n\ndef spec(id):\n return registry.spec(id)\n\nwarn_once = True\n\ndef patch_deprecated_methods(env):\n \"\"\"\n Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.\n For backward compatibility, this makes it possible to work with unmodified environments.\n \"\"\"\n global warn_once\n if warn_once:\n logger.warn(\"Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior.\" % str(type(env)))\n warn_once = False\n env.reset = env._reset\n env.step = env._step\n env.seed = env._seed\n def render(mode):\n return env._render(mode, close=False)\n def close():\n env._render(\"human\", close=True)\n env.render = render\n env.close = close\n", "path": "gym/envs/registration.py"}], "after_files": [{"content": "import re\nimport importlib\nfrom gym import error, logger\n\n# This format is true today, but it's *not* an official spec.\n# [username/](env-name)-v(version) env-name is group 1, version is group 2\n#\n# 2016-10-31: We're experimentally expanding the environment ID format\n# to include an optional username.\nenv_id_re = re.compile(r'^(?:[\\w:-]+\\/)?([\\w:.-]+)-v(\\d+)$')\n\n\ndef load(name):\n mod_name, attr_name = name.split(\":\")\n mod = importlib.import_module(mod_name)\n fn = getattr(mod, attr_name)\n return fn\n\n\nclass EnvSpec(object):\n \"\"\"A specification for a particular instance of the environment. Used\n to register the parameters for official evaluations.\n\n Args:\n id (str): The official environment ID\n entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)\n trials (int): The number of trials to average reward over\n reward_threshold (Optional[int]): The reward threshold before the task is considered solved\n local_only: True if the environment is to be used only on the local machine (e.g. debugging envs)\n kwargs (dict): The kwargs to pass to the environment class\n nondeterministic (bool): Whether this environment is non-deterministic even after seeding\n tags (dict[str:any]): A set of arbitrary key-value tags on this environment, including simple property=True tags\n\n Attributes:\n id (str): The official environment ID\n trials (int): The number of trials run in official evaluation\n \"\"\"\n\n def __init__(self, id, entry_point=None, trials=100, reward_threshold=None, local_only=False, kwargs=None, nondeterministic=False, tags=None, max_episode_steps=None, max_episode_seconds=None, timestep_limit=None):\n self.id = id\n # Evaluation parameters\n self.trials = trials\n self.reward_threshold = reward_threshold\n # Environment properties\n self.nondeterministic = nondeterministic\n\n if tags is None:\n tags = {}\n self.tags = tags\n\n # BACKWARDS COMPAT 2017/1/18\n if tags.get('wrapper_config.TimeLimit.max_episode_steps'):\n max_episode_steps = tags.get('wrapper_config.TimeLimit.max_episode_steps')\n warnings.warn(\"DEPRECATION WARNING wrapper_config.TimeLimit has been deprecated. Replace any calls to `register(tags={'wrapper_config.TimeLimit.max_episode_steps': 200)}` with `register(max_episode_steps=200)`. This change was made 2017/1/31 and is included in gym version 0.8.0. If you are getting many of these warnings, you may need to update switch from universe 0.21.3 to retro (https://github.com/openai/retro)\")\n\n tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps\n ######\n\n # BACKWARDS COMPAT 2017/1/31\n if timestep_limit is not None:\n max_episode_steps = timestep_limit\n warnings.warn(\"register(timestep_limit={}) is deprecated. Use register(max_episode_steps={}) instead.\".format(timestep_limit, timestep_limit))\n\n self.max_episode_steps = max_episode_steps\n self.max_episode_seconds = max_episode_seconds\n\n # We may make some of these other parameters public if they're\n # useful.\n match = env_id_re.search(id)\n if not match:\n raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))\n self._env_name = match.group(1)\n self._entry_point = entry_point\n self._local_only = local_only\n self._kwargs = {} if kwargs is None else kwargs\n\n def make(self, **kwargs):\n \"\"\"Instantiates an instance of the environment with appropriate kwargs\"\"\"\n if self._entry_point is None:\n raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))\n _kwargs = self._kwargs.copy()\n _kwargs.update(kwargs)\n if callable(self._entry_point):\n env = self._entry_point(**_kwargs)\n else:\n cls = load(self._entry_point)\n env = cls(**_kwargs)\n\n # Make the enviroment aware of which spec it came from.\n env.unwrapped.spec = self\n\n return env\n\n def __repr__(self):\n return \"EnvSpec({})\".format(self.id)\n\n @property\n def timestep_limit(self):\n return self.max_episode_steps\n\n @timestep_limit.setter\n def timestep_limit(self, value):\n self.max_episode_steps = value\n\n\nclass EnvRegistry(object):\n \"\"\"Register an env by ID. IDs remain stable over time and are\n guaranteed to resolve to the same environment dynamics (or be\n desupported). The goal is that results on a particular environment\n should always be comparable, and not depend on the version of the\n code that was running.\n \"\"\"\n\n def __init__(self):\n self.env_specs = {}\n\n def make(self, path, **kwargs):\n if len(kwargs) > 0:\n logger.info('Making new env: %s (%s)', path, kwargs)\n else:\n logger.info('Making new env: %s', path)\n spec = self.spec(path)\n env = spec.make(**kwargs)\n # We used to have people override _reset/_step rather than\n # reset/step. Set _gym_disable_underscore_compat = True on\n # your environment if you use these methods and don't want\n # compatibility code to be invoked.\n if hasattr(env, \"_reset\") and hasattr(env, \"_step\") and not getattr(env, \"_gym_disable_underscore_compat\", False):\n patch_deprecated_methods(env)\n if (env.spec.timestep_limit is not None) and not spec.tags.get('vnc'):\n from gym.wrappers.time_limit import TimeLimit\n env = TimeLimit(env,\n max_episode_steps=env.spec.max_episode_steps,\n max_episode_seconds=env.spec.max_episode_seconds)\n return env\n\n\n def all(self):\n return self.env_specs.values()\n\n def spec(self, path):\n if ':' in path:\n mod_name, _sep, id = path.partition(':')\n try:\n importlib.import_module(mod_name)\n except ModuleNotFoundError:\n raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))\n else:\n id = path\n\n match = env_id_re.search(id)\n if not match:\n raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))\n\n try:\n return self.env_specs[id]\n except KeyError:\n # Parse the env name and check to see if it matches the non-version\n # part of a valid env (could also check the exact number here)\n env_name = match.group(1)\n matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()\n if env_name == valid_env_spec._env_name]\n if matching_envs:\n raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))\n else:\n raise error.UnregisteredEnv('No registered env with id: {}'.format(id))\n\n def register(self, id, **kwargs):\n if id in self.env_specs:\n raise error.Error('Cannot re-register id: {}'.format(id))\n self.env_specs[id] = EnvSpec(id, **kwargs)\n\n# Have a global registry\nregistry = EnvRegistry()\n\ndef register(id, **kwargs):\n return registry.register(id, **kwargs)\n\ndef make(id, **kwargs):\n return registry.make(id, **kwargs)\n\ndef spec(id):\n return registry.spec(id)\n\nwarn_once = True\n\ndef patch_deprecated_methods(env):\n \"\"\"\n Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.\n For backward compatibility, this makes it possible to work with unmodified environments.\n \"\"\"\n global warn_once\n if warn_once:\n logger.warn(\"Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior.\" % str(type(env)))\n warn_once = False\n env.reset = env._reset\n env.step = env._step\n env.seed = env._seed\n def render(mode):\n return env._render(mode, close=False)\n def close():\n env._render(\"human\", close=True)\n env.render = render\n env.close = close\n", "path": "gym/envs/registration.py"}]} | 2,868 | 518 |
gh_patches_debug_40713 | rasdani/github-patches | git_diff | translate__pootle-3806 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document the dump management command
`dump` looks like it could be useful for development. But I'm not 100% sure what data I'm looking at when I execute it. @ta2-1 I suspect this one might be yours. Let me know how it works and I'll happily document it. Or the other option is to remove it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/management/commands/dump.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10
11 import os
12
13 import sys
14 reload(sys)
15 sys.setdefaultencoding('utf-8')
16
17 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
18
19 from optparse import make_option
20
21 from pootle.core.utils.json import jsonify
22 from pootle_app.management.commands import PootleCommand
23 from pootle_app.models import Directory
24 from pootle_project.models import Project
25
26 DUMPED = {
27 'TranslationProject': ('pootle_path', 'real_path', 'disabled'),
28 'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),
29 'Directory': ('name', 'parent', 'pootle_path'),
30 'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',
31 'developer_comment', 'translator_comment', 'locations',
32 'isobsolete', 'isfuzzy', 'istranslated'),
33 'Suggestion': ('target_f', 'user_id'),
34 'Language': ('code', 'fullname', 'pootle_path'),
35 'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',
36 'treestyle', 'source_language', 'ignoredfiles',
37 'screenshot_search_prefix', 'disabled')
38 }
39
40
41 class Command(PootleCommand):
42 help = "Dump data."
43
44 shared_option_list = (
45 make_option('--stats', action='store_true', dest='stats',
46 help='Dump stats'),
47 make_option('--data', action='store_true', dest='data',
48 help='Data all data'),
49 make_option('--stop-level', action='store', dest='stop_level',
50 default=-1),
51 )
52 option_list = PootleCommand.option_list + shared_option_list
53
54 def handle_all(self, **options):
55 if not self.projects and not self.languages:
56 stats = options.get('stats', False)
57 data = options.get('data', False)
58 stop_level = int(options.get('stop_level', -1))
59 if stats:
60 self.dump_stats(stop_level=stop_level)
61 if data:
62 self.dump_all(stop_level=stop_level)
63 else:
64 super(Command, self).handle_all(**options)
65
66 def handle_translation_project(self, tp, **options):
67 stats = options.get('stats', False)
68 data = options.get('data', False)
69 stop_level = int(options.get('stop_level', -1))
70 if stats:
71 res = {}
72 self._dump_stats(tp.directory, res, stop_level=stop_level)
73
74 stats_dump = jsonify(res)
75 self.stdout.write(stats_dump)
76 if data:
77 self._dump_item(tp.directory, 0, stop_level=stop_level)
78
79 def dump_stats(self, stop_level):
80 res = {}
81 for prj in Project.objects.all():
82 self._dump_stats(prj, res, stop_level=stop_level)
83
84 for key, item in res.items():
85 out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
86 (key, item['total'], item['translated'], item['fuzzy'],
87 item['suggestions'], item['critical'], item['is_dirty'],
88 item['lastaction']['id'], item['lastupdated']['id'])
89
90 self.stdout.write(out)
91
92 def _dump_stats(self, item, res, stop_level):
93 key = item.get_cachekey()
94 item.initialize_children()
95
96 if stop_level != 0 and item.children:
97 if stop_level > 0:
98 stop_level = stop_level - 1
99 for child in item.children:
100 self._dump_stats(child, res,
101 stop_level=stop_level)
102
103 res[key] = (item.get_stats(include_children=False))
104
105 def dump_all(self, stop_level):
106 root = Directory.objects.root
107 self._dump_item(root, 0, stop_level=stop_level)
108
109 def _dump_item(self, item, level, stop_level):
110 self.stdout.write(self.dumped(item))
111 if item.is_dir:
112 # item is a Directory
113 if item.is_project():
114 self.stdout.write(self.dumped(item.project))
115 elif item.is_language():
116 self.stdout.write(self.dumped(item.language))
117 elif item.is_translationproject():
118 try:
119 self.stdout.write(self.dumped(item.translationproject))
120 except:
121 pass
122 else:
123 # item should be a Store
124 for unit in item.units:
125 self.stdout.write(self.dumped(unit))
126 for sg in unit.get_suggestions():
127 self.stdout.write(self.dumped(sg))
128
129 if stop_level != level:
130 item.initialize_children()
131 if item.children:
132 for child in item.children:
133 self._dump_item(child, level + 1, stop_level=stop_level)
134
135 def dumped(self, item):
136 def get_param(param):
137 p = getattr(item, param)
138 res = p() if callable(p) else p
139 res = u"%s" % res
140 res = res.replace('\n', '\\n')
141 return (param, res)
142
143 return u"%d:%s\t%s" % \
144 (
145 item.id,
146 item._meta.object_name,
147 "\t".join(
148 u"%s=%s" % (k, v)
149 for k, v in map(get_param, DUMPED[item._meta.object_name])
150 )
151 )
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_app/management/commands/dump.py b/pootle/apps/pootle_app/management/commands/dump.py
--- a/pootle/apps/pootle_app/management/commands/dump.py
+++ b/pootle/apps/pootle_app/management/commands/dump.py
@@ -18,7 +18,8 @@
from optparse import make_option
-from pootle.core.utils.json import jsonify
+from django.core.management.base import CommandError
+
from pootle_app.management.commands import PootleCommand
from pootle_app.models import Directory
from pootle_project.models import Project
@@ -56,10 +57,15 @@
stats = options.get('stats', False)
data = options.get('data', False)
stop_level = int(options.get('stop_level', -1))
+
if stats:
self.dump_stats(stop_level=stop_level)
+ return
if data:
self.dump_all(stop_level=stop_level)
+ return
+
+ raise CommandError("Set --data or --stats option.")
else:
super(Command, self).handle_all(**options)
@@ -70,25 +76,19 @@
if stats:
res = {}
self._dump_stats(tp.directory, res, stop_level=stop_level)
+ return
- stats_dump = jsonify(res)
- self.stdout.write(stats_dump)
if data:
self._dump_item(tp.directory, 0, stop_level=stop_level)
+ return
+
+ raise CommandError("Set --data or --stats option.")
def dump_stats(self, stop_level):
res = {}
for prj in Project.objects.all():
self._dump_stats(prj, res, stop_level=stop_level)
- for key, item in res.items():
- out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
- (key, item['total'], item['translated'], item['fuzzy'],
- item['suggestions'], item['critical'], item['is_dirty'],
- item['lastaction']['id'], item['lastupdated']['id'])
-
- self.stdout.write(out)
-
def _dump_stats(self, item, res, stop_level):
key = item.get_cachekey()
item.initialize_children()
@@ -102,6 +102,24 @@
res[key] = (item.get_stats(include_children=False))
+ if res[key]['lastaction']:
+ last_action_id = res[key]['lastaction']['id']
+ else:
+ last_action_id = None
+
+ if res[key]['lastupdated']:
+ last_updated_id = res[key]['lastupdated']['id']
+ else:
+ last_updated_id = None
+
+ out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
+ (key, res[key]['total'], res[key]['translated'],
+ res[key]['fuzzy'], res[key]['suggestions'],
+ res[key]['critical'], res[key]['is_dirty'],
+ last_action_id, last_updated_id)
+
+ self.stdout.write(out)
+
def dump_all(self, stop_level):
root = Directory.objects.root
self._dump_item(root, 0, stop_level=stop_level)
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/dump.py b/pootle/apps/pootle_app/management/commands/dump.py\n--- a/pootle/apps/pootle_app/management/commands/dump.py\n+++ b/pootle/apps/pootle_app/management/commands/dump.py\n@@ -18,7 +18,8 @@\n \n from optparse import make_option\n \n-from pootle.core.utils.json import jsonify\n+from django.core.management.base import CommandError\n+\n from pootle_app.management.commands import PootleCommand\n from pootle_app.models import Directory\n from pootle_project.models import Project\n@@ -56,10 +57,15 @@\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n+\n if stats:\n self.dump_stats(stop_level=stop_level)\n+ return\n if data:\n self.dump_all(stop_level=stop_level)\n+ return\n+\n+ raise CommandError(\"Set --data or --stats option.\")\n else:\n super(Command, self).handle_all(**options)\n \n@@ -70,25 +76,19 @@\n if stats:\n res = {}\n self._dump_stats(tp.directory, res, stop_level=stop_level)\n+ return\n \n- stats_dump = jsonify(res)\n- self.stdout.write(stats_dump)\n if data:\n self._dump_item(tp.directory, 0, stop_level=stop_level)\n+ return\n+\n+ raise CommandError(\"Set --data or --stats option.\")\n \n def dump_stats(self, stop_level):\n res = {}\n for prj in Project.objects.all():\n self._dump_stats(prj, res, stop_level=stop_level)\n \n- for key, item in res.items():\n- out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n- (key, item['total'], item['translated'], item['fuzzy'],\n- item['suggestions'], item['critical'], item['is_dirty'],\n- item['lastaction']['id'], item['lastupdated']['id'])\n-\n- self.stdout.write(out)\n-\n def _dump_stats(self, item, res, stop_level):\n key = item.get_cachekey()\n item.initialize_children()\n@@ -102,6 +102,24 @@\n \n res[key] = (item.get_stats(include_children=False))\n \n+ if res[key]['lastaction']:\n+ last_action_id = res[key]['lastaction']['id']\n+ else:\n+ last_action_id = None\n+\n+ if res[key]['lastupdated']:\n+ last_updated_id = res[key]['lastupdated']['id']\n+ else:\n+ last_updated_id = None\n+\n+ out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n+ (key, res[key]['total'], res[key]['translated'],\n+ res[key]['fuzzy'], res[key]['suggestions'],\n+ res[key]['critical'], res[key]['is_dirty'],\n+ last_action_id, last_updated_id)\n+\n+ self.stdout.write(out)\n+\n def dump_all(self, stop_level):\n root = Directory.objects.root\n self._dump_item(root, 0, stop_level=stop_level)\n", "issue": "Document the dump management command\n`dump` looks like it could be useful for development. But I'm not 100% sure what data I'm looking at when I execute it. @ta2-1 I suspect this one might be yours. Let me know how it works and I'll happily document it. Or the other option is to remove it.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nimport os\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom pootle.core.utils.json import jsonify\nfrom pootle_app.management.commands import PootleCommand\nfrom pootle_app.models import Directory\nfrom pootle_project.models import Project\n\nDUMPED = {\n 'TranslationProject': ('pootle_path', 'real_path', 'disabled'),\n 'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),\n 'Directory': ('name', 'parent', 'pootle_path'),\n 'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',\n 'developer_comment', 'translator_comment', 'locations',\n 'isobsolete', 'isfuzzy', 'istranslated'),\n 'Suggestion': ('target_f', 'user_id'),\n 'Language': ('code', 'fullname', 'pootle_path'),\n 'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',\n 'treestyle', 'source_language', 'ignoredfiles',\n 'screenshot_search_prefix', 'disabled')\n}\n\n\nclass Command(PootleCommand):\n help = \"Dump data.\"\n\n shared_option_list = (\n make_option('--stats', action='store_true', dest='stats',\n help='Dump stats'),\n make_option('--data', action='store_true', dest='data',\n help='Data all data'),\n make_option('--stop-level', action='store', dest='stop_level',\n default=-1),\n )\n option_list = PootleCommand.option_list + shared_option_list\n\n def handle_all(self, **options):\n if not self.projects and not self.languages:\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n if stats:\n self.dump_stats(stop_level=stop_level)\n if data:\n self.dump_all(stop_level=stop_level)\n else:\n super(Command, self).handle_all(**options)\n\n def handle_translation_project(self, tp, **options):\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n if stats:\n res = {}\n self._dump_stats(tp.directory, res, stop_level=stop_level)\n\n stats_dump = jsonify(res)\n self.stdout.write(stats_dump)\n if data:\n self._dump_item(tp.directory, 0, stop_level=stop_level)\n\n def dump_stats(self, stop_level):\n res = {}\n for prj in Project.objects.all():\n self._dump_stats(prj, res, stop_level=stop_level)\n\n for key, item in res.items():\n out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n (key, item['total'], item['translated'], item['fuzzy'],\n item['suggestions'], item['critical'], item['is_dirty'],\n item['lastaction']['id'], item['lastupdated']['id'])\n\n self.stdout.write(out)\n\n def _dump_stats(self, item, res, stop_level):\n key = item.get_cachekey()\n item.initialize_children()\n\n if stop_level != 0 and item.children:\n if stop_level > 0:\n stop_level = stop_level - 1\n for child in item.children:\n self._dump_stats(child, res,\n stop_level=stop_level)\n\n res[key] = (item.get_stats(include_children=False))\n\n def dump_all(self, stop_level):\n root = Directory.objects.root\n self._dump_item(root, 0, stop_level=stop_level)\n\n def _dump_item(self, item, level, stop_level):\n self.stdout.write(self.dumped(item))\n if item.is_dir:\n # item is a Directory\n if item.is_project():\n self.stdout.write(self.dumped(item.project))\n elif item.is_language():\n self.stdout.write(self.dumped(item.language))\n elif item.is_translationproject():\n try:\n self.stdout.write(self.dumped(item.translationproject))\n except:\n pass\n else:\n # item should be a Store\n for unit in item.units:\n self.stdout.write(self.dumped(unit))\n for sg in unit.get_suggestions():\n self.stdout.write(self.dumped(sg))\n\n if stop_level != level:\n item.initialize_children()\n if item.children:\n for child in item.children:\n self._dump_item(child, level + 1, stop_level=stop_level)\n\n def dumped(self, item):\n def get_param(param):\n p = getattr(item, param)\n res = p() if callable(p) else p\n res = u\"%s\" % res\n res = res.replace('\\n', '\\\\n')\n return (param, res)\n\n return u\"%d:%s\\t%s\" % \\\n (\n item.id,\n item._meta.object_name,\n \"\\t\".join(\n u\"%s=%s\" % (k, v)\n for k, v in map(get_param, DUMPED[item._meta.object_name])\n )\n )\n", "path": "pootle/apps/pootle_app/management/commands/dump.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nimport os\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom django.core.management.base import CommandError\n\nfrom pootle_app.management.commands import PootleCommand\nfrom pootle_app.models import Directory\nfrom pootle_project.models import Project\n\nDUMPED = {\n 'TranslationProject': ('pootle_path', 'real_path', 'disabled'),\n 'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),\n 'Directory': ('name', 'parent', 'pootle_path'),\n 'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',\n 'developer_comment', 'translator_comment', 'locations',\n 'isobsolete', 'isfuzzy', 'istranslated'),\n 'Suggestion': ('target_f', 'user_id'),\n 'Language': ('code', 'fullname', 'pootle_path'),\n 'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',\n 'treestyle', 'source_language', 'ignoredfiles',\n 'screenshot_search_prefix', 'disabled')\n}\n\n\nclass Command(PootleCommand):\n help = \"Dump data.\"\n\n shared_option_list = (\n make_option('--stats', action='store_true', dest='stats',\n help='Dump stats'),\n make_option('--data', action='store_true', dest='data',\n help='Data all data'),\n make_option('--stop-level', action='store', dest='stop_level',\n default=-1),\n )\n option_list = PootleCommand.option_list + shared_option_list\n\n def handle_all(self, **options):\n if not self.projects and not self.languages:\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n\n if stats:\n self.dump_stats(stop_level=stop_level)\n return\n if data:\n self.dump_all(stop_level=stop_level)\n return\n\n raise CommandError(\"Set --data or --stats option.\")\n else:\n super(Command, self).handle_all(**options)\n\n def handle_translation_project(self, tp, **options):\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n if stats:\n res = {}\n self._dump_stats(tp.directory, res, stop_level=stop_level)\n return\n\n if data:\n self._dump_item(tp.directory, 0, stop_level=stop_level)\n return\n\n raise CommandError(\"Set --data or --stats option.\")\n\n def dump_stats(self, stop_level):\n res = {}\n for prj in Project.objects.all():\n self._dump_stats(prj, res, stop_level=stop_level)\n\n def _dump_stats(self, item, res, stop_level):\n key = item.get_cachekey()\n item.initialize_children()\n\n if stop_level != 0 and item.children:\n if stop_level > 0:\n stop_level = stop_level - 1\n for child in item.children:\n self._dump_stats(child, res,\n stop_level=stop_level)\n\n res[key] = (item.get_stats(include_children=False))\n\n if res[key]['lastaction']:\n last_action_id = res[key]['lastaction']['id']\n else:\n last_action_id = None\n\n if res[key]['lastupdated']:\n last_updated_id = res[key]['lastupdated']['id']\n else:\n last_updated_id = None\n\n out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n (key, res[key]['total'], res[key]['translated'],\n res[key]['fuzzy'], res[key]['suggestions'],\n res[key]['critical'], res[key]['is_dirty'],\n last_action_id, last_updated_id)\n\n self.stdout.write(out)\n\n def dump_all(self, stop_level):\n root = Directory.objects.root\n self._dump_item(root, 0, stop_level=stop_level)\n\n def _dump_item(self, item, level, stop_level):\n self.stdout.write(self.dumped(item))\n if item.is_dir:\n # item is a Directory\n if item.is_project():\n self.stdout.write(self.dumped(item.project))\n elif item.is_language():\n self.stdout.write(self.dumped(item.language))\n elif item.is_translationproject():\n try:\n self.stdout.write(self.dumped(item.translationproject))\n except:\n pass\n else:\n # item should be a Store\n for unit in item.units:\n self.stdout.write(self.dumped(unit))\n for sg in unit.get_suggestions():\n self.stdout.write(self.dumped(sg))\n\n if stop_level != level:\n item.initialize_children()\n if item.children:\n for child in item.children:\n self._dump_item(child, level + 1, stop_level=stop_level)\n\n def dumped(self, item):\n def get_param(param):\n p = getattr(item, param)\n res = p() if callable(p) else p\n res = u\"%s\" % res\n res = res.replace('\\n', '\\\\n')\n return (param, res)\n\n return u\"%d:%s\\t%s\" % \\\n (\n item.id,\n item._meta.object_name,\n \"\\t\".join(\n u\"%s=%s\" % (k, v)\n for k, v in map(get_param, DUMPED[item._meta.object_name])\n )\n )\n", "path": "pootle/apps/pootle_app/management/commands/dump.py"}]} | 1,918 | 742 |
gh_patches_debug_12538 | rasdani/github-patches | git_diff | kivy__python-for-android-3027 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix sqlalchemy recipe
Fix sqlalchemy build errors and bump sqlalchemy from 1.3.3 to 2.0+.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/sqlalchemy/__init__.py`
Content:
```
1 from pythonforandroid.recipe import CompiledComponentsPythonRecipe
2
3
4 class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):
5 name = 'sqlalchemy'
6 version = '1.3.3'
7 url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'
8 call_hostpython_via_targetpython = False
9
10 depends = ['setuptools']
11
12 patches = ['zipsafe.patch']
13
14
15 recipe = SQLAlchemyRecipe()
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/sqlalchemy/__init__.py b/pythonforandroid/recipes/sqlalchemy/__init__.py
--- a/pythonforandroid/recipes/sqlalchemy/__init__.py
+++ b/pythonforandroid/recipes/sqlalchemy/__init__.py
@@ -1,15 +1,15 @@
-from pythonforandroid.recipe import CompiledComponentsPythonRecipe
+from pythonforandroid.recipe import PyProjectRecipe
-class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):
+class SQLAlchemyRecipe(PyProjectRecipe):
name = 'sqlalchemy'
- version = '1.3.3'
- url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'
- call_hostpython_via_targetpython = False
-
+ version = '2.0.30'
+ url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'
depends = ['setuptools']
- patches = ['zipsafe.patch']
+ @property
+ def versioned_url(self):
+ return self.url.format(self.version.replace(".", "_"))
recipe = SQLAlchemyRecipe()
| {"golden_diff": "diff --git a/pythonforandroid/recipes/sqlalchemy/__init__.py b/pythonforandroid/recipes/sqlalchemy/__init__.py\n--- a/pythonforandroid/recipes/sqlalchemy/__init__.py\n+++ b/pythonforandroid/recipes/sqlalchemy/__init__.py\n@@ -1,15 +1,15 @@\n-from pythonforandroid.recipe import CompiledComponentsPythonRecipe\n+from pythonforandroid.recipe import PyProjectRecipe\n \n \n-class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):\n+class SQLAlchemyRecipe(PyProjectRecipe):\n name = 'sqlalchemy'\n- version = '1.3.3'\n- url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'\n- call_hostpython_via_targetpython = False\n-\n+ version = '2.0.30'\n+ url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'\n depends = ['setuptools']\n \n- patches = ['zipsafe.patch']\n+ @property\n+ def versioned_url(self):\n+ return self.url.format(self.version.replace(\".\", \"_\"))\n \n \n recipe = SQLAlchemyRecipe()\n", "issue": "Fix sqlalchemy recipe\nFix sqlalchemy build errors and bump sqlalchemy from 1.3.3 to 2.0+.\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import CompiledComponentsPythonRecipe\n\n\nclass SQLAlchemyRecipe(CompiledComponentsPythonRecipe):\n name = 'sqlalchemy'\n version = '1.3.3'\n url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'\n call_hostpython_via_targetpython = False\n\n depends = ['setuptools']\n\n patches = ['zipsafe.patch']\n\n\nrecipe = SQLAlchemyRecipe()\n", "path": "pythonforandroid/recipes/sqlalchemy/__init__.py"}], "after_files": [{"content": "from pythonforandroid.recipe import PyProjectRecipe\n\n\nclass SQLAlchemyRecipe(PyProjectRecipe):\n name = 'sqlalchemy'\n version = '2.0.30'\n url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'\n depends = ['setuptools']\n\n @property\n def versioned_url(self):\n return self.url.format(self.version.replace(\".\", \"_\"))\n\n\nrecipe = SQLAlchemyRecipe()\n", "path": "pythonforandroid/recipes/sqlalchemy/__init__.py"}]} | 413 | 249 |
gh_patches_debug_5816 | rasdani/github-patches | git_diff | pulp__pulpcore-4684 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
file:// sync deletes files from directory
**Version**
Pulpcore 3.39
**Describe the bug**
When syncing file:// repositories, files are disappearing after the sync.
**To Reproduce**
1) Copy these two repositories to the FS:
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2
2) Sync one, then the other
3) See that some files disappeared.
- In my case, file2 lost every file except PULP_MANIFEST
**Expected behavior**
No files disappear.
**Additional context**
This also occurred with RPM content type files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/download/file.py`
Content:
```
1 import os
2
3 from urllib.parse import urlparse
4
5 import aiofiles
6
7 from .base import BaseDownloader, DownloadResult
8
9
10 class FileDownloader(BaseDownloader):
11 """
12 A downloader for downloading files from the filesystem.
13
14 It provides digest and size validation along with computation of the digests needed to save the
15 file as an Artifact. It writes a new file to the disk and the return path is included in the
16 :class:`~pulpcore.plugin.download.DownloadResult`.
17
18 This downloader has all of the attributes of
19 :class:`~pulpcore.plugin.download.BaseDownloader`
20 """
21
22 def __init__(self, url, *args, **kwargs):
23 """
24 Download files from a url that starts with `file://`
25
26 Args:
27 url (str): The url to the file. This is expected to begin with `file://`
28 kwargs (dict): This accepts the parameters of
29 :class:`~pulpcore.plugin.download.BaseDownloader`.
30
31 Raises:
32 ValidationError: When the url starts with `file://`, but is not a subfolder of a path in
33 the ALLOWED_IMPORT_PATH setting.
34 """
35 from pulpcore.app.serializers import RemoteSerializer
36
37 RemoteSerializer().validate_url(url)
38 p = urlparse(url)
39 self._path = os.path.abspath(os.path.join(p.netloc, p.path))
40 super().__init__(url, *args, **kwargs)
41
42 async def _run(self, extra_data=None):
43 """
44 Read, validate, and compute digests on the `url`. This is a coroutine.
45
46 This method provides the same return object type and documented in
47 :meth:`~pulpcore.plugin.download.BaseDownloader._run`.
48
49 Args:
50 extra_data (dict): Extra data passed to the downloader.
51 """
52 async with aiofiles.open(self._path, "rb") as f_handle:
53 while True:
54 chunk = await f_handle.read(1048576) # 1 megabyte
55 if not chunk:
56 await self.finalize()
57 break # the reading is done
58 await self.handle_data(chunk)
59 return DownloadResult(
60 path=self._path,
61 artifact_attributes=self.artifact_attributes,
62 url=self.url,
63 headers=None,
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py
--- a/pulpcore/download/file.py
+++ b/pulpcore/download/file.py
@@ -57,7 +57,7 @@
break # the reading is done
await self.handle_data(chunk)
return DownloadResult(
- path=self._path,
+ path=self.path,
artifact_attributes=self.artifact_attributes,
url=self.url,
headers=None,
| {"golden_diff": "diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py\n--- a/pulpcore/download/file.py\n+++ b/pulpcore/download/file.py\n@@ -57,7 +57,7 @@\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n- path=self._path,\n+ path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n", "issue": "file:// sync deletes files from directory\n**Version**\r\nPulpcore 3.39\r\n\r\n**Describe the bug**\r\nWhen syncing file:// repositories, files are disappearing after the sync.\r\n\r\n**To Reproduce**\r\n1) Copy these two repositories to the FS:\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2\r\n2) Sync one, then the other\r\n3) See that some files disappeared.\r\n - In my case, file2 lost every file except PULP_MANIFEST\r\n\r\n\r\n**Expected behavior**\r\nNo files disappear.\r\n\r\n**Additional context**\r\nThis also occurred with RPM content type files.\r\n\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self._path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}], "after_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}]} | 1,016 | 99 |
gh_patches_debug_17731 | rasdani/github-patches | git_diff | encode__httpx-1075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aiter_lines() doesn't return full lines that span multiple chunks
<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline
### test-httpx.py
```python
import asyncio
import json
import httpx
class TestHttpx:
def __init__(self):
pass
async def __call__(self):
http_client = httpx.AsyncClient()
async with http_client.stream(method="GET", url='http://localhost:8080/lines') as response:
is_message = True
async for line in response.aiter_lines():
is_message = not is_message
if is_message:
message = json.loads(line)
print(message)
def main():
test_httpx = TestHttpx()
asyncio.run(test_httpx())
if __name__ == '__main__':
main()
```
aiter_lines() doesn't return full lines that span multiple chunks
<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline
### test-httpx.py
```python
import asyncio
import json
import httpx
class TestHttpx:
def __init__(self):
pass
async def __call__(self):
http_client = httpx.AsyncClient()
async with http_client.stream(method="GET", url='http://localhost:8080/lines') as response:
is_message = True
async for line in response.aiter_lines():
is_message = not is_message
if is_message:
message = json.loads(line)
print(message)
def main():
test_httpx = TestHttpx()
asyncio.run(test_httpx())
if __name__ == '__main__':
main()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_decoders.py`
Content:
```
1 """
2 Handlers for Content-Encoding.
3
4 See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
5 """
6 import codecs
7 import typing
8 import zlib
9
10 import chardet
11
12 from ._exceptions import DecodingError
13
14 try:
15 import brotli
16 except ImportError: # pragma: nocover
17 brotli = None
18
19
20 class Decoder:
21 def decode(self, data: bytes) -> bytes:
22 raise NotImplementedError() # pragma: nocover
23
24 def flush(self) -> bytes:
25 raise NotImplementedError() # pragma: nocover
26
27
28 class IdentityDecoder(Decoder):
29 """
30 Handle unencoded data.
31 """
32
33 def decode(self, data: bytes) -> bytes:
34 return data
35
36 def flush(self) -> bytes:
37 return b""
38
39
40 class DeflateDecoder(Decoder):
41 """
42 Handle 'deflate' decoding.
43
44 See: https://stackoverflow.com/questions/1838699
45 """
46
47 def __init__(self) -> None:
48 self.first_attempt = True
49 self.decompressor = zlib.decompressobj()
50
51 def decode(self, data: bytes) -> bytes:
52 was_first_attempt = self.first_attempt
53 self.first_attempt = False
54 try:
55 return self.decompressor.decompress(data)
56 except zlib.error as exc:
57 if was_first_attempt:
58 self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
59 return self.decode(data)
60 raise DecodingError from exc
61
62 def flush(self) -> bytes:
63 try:
64 return self.decompressor.flush()
65 except zlib.error as exc: # pragma: nocover
66 raise DecodingError from exc
67
68
69 class GZipDecoder(Decoder):
70 """
71 Handle 'gzip' decoding.
72
73 See: https://stackoverflow.com/questions/1838699
74 """
75
76 def __init__(self) -> None:
77 self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)
78
79 def decode(self, data: bytes) -> bytes:
80 try:
81 return self.decompressor.decompress(data)
82 except zlib.error as exc:
83 raise DecodingError from exc
84
85 def flush(self) -> bytes:
86 try:
87 return self.decompressor.flush()
88 except zlib.error as exc: # pragma: nocover
89 raise DecodingError from exc
90
91
92 class BrotliDecoder(Decoder):
93 """
94 Handle 'brotli' decoding.
95
96 Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/
97 or `pip install brotli`. See https://github.com/google/brotli
98 Supports both 'brotlipy' and 'Brotli' packages since they share an import
99 name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'
100 """
101
102 def __init__(self) -> None:
103 assert (
104 brotli is not None
105 ), "The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'"
106 self.decompressor = brotli.Decompressor()
107 self.seen_data = False
108 if hasattr(self.decompressor, "decompress"):
109 self._decompress = self.decompressor.decompress
110 else:
111 self._decompress = self.decompressor.process # pragma: nocover
112
113 def decode(self, data: bytes) -> bytes:
114 if not data:
115 return b""
116 self.seen_data = True
117 try:
118 return self._decompress(data)
119 except brotli.error as exc:
120 raise DecodingError from exc
121
122 def flush(self) -> bytes:
123 if not self.seen_data:
124 return b""
125 try:
126 if hasattr(self.decompressor, "finish"):
127 self.decompressor.finish()
128 return b""
129 except brotli.error as exc: # pragma: nocover
130 raise DecodingError from exc
131
132
133 class MultiDecoder(Decoder):
134 """
135 Handle the case where multiple encodings have been applied.
136 """
137
138 def __init__(self, children: typing.Sequence[Decoder]) -> None:
139 """
140 'children' should be a sequence of decoders in the order in which
141 each was applied.
142 """
143 # Note that we reverse the order for decoding.
144 self.children = list(reversed(children))
145
146 def decode(self, data: bytes) -> bytes:
147 for child in self.children:
148 data = child.decode(data)
149 return data
150
151 def flush(self) -> bytes:
152 data = b""
153 for child in self.children:
154 data = child.decode(data) + child.flush()
155 return data
156
157
158 class TextDecoder:
159 """
160 Handles incrementally decoding bytes into text
161 """
162
163 def __init__(self, encoding: typing.Optional[str] = None):
164 self.decoder: typing.Optional[codecs.IncrementalDecoder] = (
165 None if encoding is None else codecs.getincrementaldecoder(encoding)()
166 )
167 self.detector = chardet.universaldetector.UniversalDetector()
168
169 # This buffer is only needed if 'decoder' is 'None'
170 # we want to trigger errors if data is getting added to
171 # our internal buffer for some silly reason while
172 # a decoder is discovered.
173 self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()
174
175 def decode(self, data: bytes) -> str:
176 try:
177 if self.decoder is not None:
178 text = self.decoder.decode(data)
179 else:
180 assert self.buffer is not None
181 text = ""
182 self.detector.feed(data)
183 self.buffer += data
184
185 # Should be more than enough data to process, we don't
186 # want to buffer too long as chardet will wait until
187 # detector.close() is used to give back common
188 # encodings like 'utf-8'.
189 if len(self.buffer) >= 4096:
190 self.decoder = codecs.getincrementaldecoder(
191 self._detector_result()
192 )()
193 text = self.decoder.decode(bytes(self.buffer), False)
194 self.buffer = None
195
196 return text
197 except UnicodeDecodeError: # pragma: nocover
198 raise DecodingError() from None
199
200 def flush(self) -> str:
201 try:
202 if self.decoder is None:
203 # Empty string case as chardet is guaranteed to not have a guess.
204 assert self.buffer is not None
205 if len(self.buffer) == 0:
206 return ""
207 return bytes(self.buffer).decode(self._detector_result())
208
209 return self.decoder.decode(b"", True)
210 except UnicodeDecodeError: # pragma: nocover
211 raise DecodingError() from None
212
213 def _detector_result(self) -> str:
214 self.detector.close()
215 result = self.detector.result["encoding"]
216 if not result: # pragma: nocover
217 raise DecodingError("Unable to determine encoding of content")
218
219 return result
220
221
222 class LineDecoder:
223 """
224 Handles incrementally reading lines from text.
225
226 Uses universal line decoding, supporting any of `\n`, `\r`, or `\r\n`
227 as line endings, normalizing to `\n`.
228 """
229
230 def __init__(self) -> None:
231 self.buffer = ""
232
233 def decode(self, text: str) -> typing.List[str]:
234 lines = []
235
236 if text.startswith("\n") and self.buffer and self.buffer[-1] == "\r":
237 # Handle the case where we have an "\r\n" split across
238 # our previous input, and our new chunk.
239 lines.append(self.buffer[:-1] + "\n")
240 self.buffer = ""
241 text = text[1:]
242
243 while text:
244 num_chars = len(text)
245 for idx in range(num_chars):
246 char = text[idx]
247 next_char = None if idx + 1 == num_chars else text[idx + 1]
248 if char == "\n":
249 lines.append(self.buffer + text[: idx + 1])
250 self.buffer = ""
251 text = text[idx + 1 :]
252 break
253 elif char == "\r" and next_char == "\n":
254 lines.append(self.buffer + text[:idx] + "\n")
255 self.buffer = ""
256 text = text[idx + 2 :]
257 break
258 elif char == "\r" and next_char is not None:
259 lines.append(self.buffer + text[:idx] + "\n")
260 self.buffer = ""
261 text = text[idx + 1 :]
262 break
263 elif next_char is None:
264 self.buffer += text
265 text = ""
266 break
267
268 return lines
269
270 def flush(self) -> typing.List[str]:
271 if self.buffer.endswith("\r"):
272 # Handle the case where we had a trailing '\r', which could have
273 # been a '\r\n' pair.
274 lines = [self.buffer[:-1] + "\n"]
275 elif self.buffer:
276 lines = [self.buffer]
277 else:
278 lines = []
279 self.buffer = ""
280 return lines
281
282
283 SUPPORTED_DECODERS = {
284 "identity": IdentityDecoder,
285 "gzip": GZipDecoder,
286 "deflate": DeflateDecoder,
287 "br": BrotliDecoder,
288 }
289
290
291 if brotli is None:
292 SUPPORTED_DECODERS.pop("br") # pragma: nocover
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/_decoders.py b/httpx/_decoders.py
--- a/httpx/_decoders.py
+++ b/httpx/_decoders.py
@@ -233,12 +233,18 @@
def decode(self, text: str) -> typing.List[str]:
lines = []
- if text.startswith("\n") and self.buffer and self.buffer[-1] == "\r":
- # Handle the case where we have an "\r\n" split across
- # our previous input, and our new chunk.
- lines.append(self.buffer[:-1] + "\n")
- self.buffer = ""
- text = text[1:]
+ if text and self.buffer and self.buffer[-1] == "\r":
+ if text.startswith("\n"):
+ # Handle the case where we have an "\r\n" split across
+ # our previous input, and our new chunk.
+ lines.append(self.buffer[:-1] + "\n")
+ self.buffer = ""
+ text = text[1:]
+ else:
+ # Handle the case where we have "\r" at the end of our
+ # previous input.
+ lines.append(self.buffer[:-1] + "\n")
+ self.buffer = ""
while text:
num_chars = len(text)
| {"golden_diff": "diff --git a/httpx/_decoders.py b/httpx/_decoders.py\n--- a/httpx/_decoders.py\n+++ b/httpx/_decoders.py\n@@ -233,12 +233,18 @@\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n \n- if text.startswith(\"\\n\") and self.buffer and self.buffer[-1] == \"\\r\":\n- # Handle the case where we have an \"\\r\\n\" split across\n- # our previous input, and our new chunk.\n- lines.append(self.buffer[:-1] + \"\\n\")\n- self.buffer = \"\"\n- text = text[1:]\n+ if text and self.buffer and self.buffer[-1] == \"\\r\":\n+ if text.startswith(\"\\n\"):\n+ # Handle the case where we have an \"\\r\\n\" split across\n+ # our previous input, and our new chunk.\n+ lines.append(self.buffer[:-1] + \"\\n\")\n+ self.buffer = \"\"\n+ text = text[1:]\n+ else:\n+ # Handle the case where we have \"\\r\" at the end of our\n+ # previous input.\n+ lines.append(self.buffer[:-1] + \"\\n\")\n+ self.buffer = \"\"\n \n while text:\n num_chars = len(text)\n", "issue": "aiter_lines() doesn't return full lines that span multiple chunks\n<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline\r\n\r\n### test-httpx.py\r\n```python\r\nimport asyncio\r\nimport json\r\n\r\nimport httpx\r\n\r\n\r\nclass TestHttpx:\r\n def __init__(self):\r\n pass\r\n\r\n async def __call__(self):\r\n http_client = httpx.AsyncClient()\r\n async with http_client.stream(method=\"GET\", url='http://localhost:8080/lines') as response:\r\n is_message = True\r\n async for line in response.aiter_lines():\r\n is_message = not is_message\r\n if is_message:\r\n message = json.loads(line)\r\n print(message)\r\n\r\n\r\ndef main():\r\n test_httpx = TestHttpx()\r\n asyncio.run(test_httpx())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\naiter_lines() doesn't return full lines that span multiple chunks\n<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline\r\n\r\n### test-httpx.py\r\n```python\r\nimport asyncio\r\nimport json\r\n\r\nimport httpx\r\n\r\n\r\nclass TestHttpx:\r\n def __init__(self):\r\n pass\r\n\r\n async def __call__(self):\r\n http_client = httpx.AsyncClient()\r\n async with http_client.stream(method=\"GET\", url='http://localhost:8080/lines') as response:\r\n is_message = True\r\n async for line in response.aiter_lines():\r\n is_message = not is_message\r\n if is_message:\r\n message = json.loads(line)\r\n print(message)\r\n\r\n\r\ndef main():\r\n test_httpx = TestHttpx()\r\n asyncio.run(test_httpx())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\n", "before_files": [{"content": "\"\"\"\nHandlers for Content-Encoding.\n\nSee: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\n\"\"\"\nimport codecs\nimport typing\nimport zlib\n\nimport chardet\n\nfrom ._exceptions import DecodingError\n\ntry:\n import brotli\nexcept ImportError: # pragma: nocover\n brotli = None\n\n\nclass Decoder:\n def decode(self, data: bytes) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def flush(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass IdentityDecoder(Decoder):\n \"\"\"\n Handle unencoded data.\n \"\"\"\n\n def decode(self, data: bytes) -> bytes:\n return data\n\n def flush(self) -> bytes:\n return b\"\"\n\n\nclass DeflateDecoder(Decoder):\n \"\"\"\n Handle 'deflate' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.first_attempt = True\n self.decompressor = zlib.decompressobj()\n\n def decode(self, data: bytes) -> bytes:\n was_first_attempt = self.first_attempt\n self.first_attempt = False\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n if was_first_attempt:\n self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n return self.decode(data)\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass GZipDecoder(Decoder):\n \"\"\"\n Handle 'gzip' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass BrotliDecoder(Decoder):\n \"\"\"\n Handle 'brotli' decoding.\n\n Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/\n or `pip install brotli`. See https://github.com/google/brotli\n Supports both 'brotlipy' and 'Brotli' packages since they share an import\n name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'\n \"\"\"\n\n def __init__(self) -> None:\n assert (\n brotli is not None\n ), \"The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'\"\n self.decompressor = brotli.Decompressor()\n self.seen_data = False\n if hasattr(self.decompressor, \"decompress\"):\n self._decompress = self.decompressor.decompress\n else:\n self._decompress = self.decompressor.process # pragma: nocover\n\n def decode(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n self.seen_data = True\n try:\n return self._decompress(data)\n except brotli.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n if not self.seen_data:\n return b\"\"\n try:\n if hasattr(self.decompressor, \"finish\"):\n self.decompressor.finish()\n return b\"\"\n except brotli.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass MultiDecoder(Decoder):\n \"\"\"\n Handle the case where multiple encodings have been applied.\n \"\"\"\n\n def __init__(self, children: typing.Sequence[Decoder]) -> None:\n \"\"\"\n 'children' should be a sequence of decoders in the order in which\n each was applied.\n \"\"\"\n # Note that we reverse the order for decoding.\n self.children = list(reversed(children))\n\n def decode(self, data: bytes) -> bytes:\n for child in self.children:\n data = child.decode(data)\n return data\n\n def flush(self) -> bytes:\n data = b\"\"\n for child in self.children:\n data = child.decode(data) + child.flush()\n return data\n\n\nclass TextDecoder:\n \"\"\"\n Handles incrementally decoding bytes into text\n \"\"\"\n\n def __init__(self, encoding: typing.Optional[str] = None):\n self.decoder: typing.Optional[codecs.IncrementalDecoder] = (\n None if encoding is None else codecs.getincrementaldecoder(encoding)()\n )\n self.detector = chardet.universaldetector.UniversalDetector()\n\n # This buffer is only needed if 'decoder' is 'None'\n # we want to trigger errors if data is getting added to\n # our internal buffer for some silly reason while\n # a decoder is discovered.\n self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()\n\n def decode(self, data: bytes) -> str:\n try:\n if self.decoder is not None:\n text = self.decoder.decode(data)\n else:\n assert self.buffer is not None\n text = \"\"\n self.detector.feed(data)\n self.buffer += data\n\n # Should be more than enough data to process, we don't\n # want to buffer too long as chardet will wait until\n # detector.close() is used to give back common\n # encodings like 'utf-8'.\n if len(self.buffer) >= 4096:\n self.decoder = codecs.getincrementaldecoder(\n self._detector_result()\n )()\n text = self.decoder.decode(bytes(self.buffer), False)\n self.buffer = None\n\n return text\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def flush(self) -> str:\n try:\n if self.decoder is None:\n # Empty string case as chardet is guaranteed to not have a guess.\n assert self.buffer is not None\n if len(self.buffer) == 0:\n return \"\"\n return bytes(self.buffer).decode(self._detector_result())\n\n return self.decoder.decode(b\"\", True)\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def _detector_result(self) -> str:\n self.detector.close()\n result = self.detector.result[\"encoding\"]\n if not result: # pragma: nocover\n raise DecodingError(\"Unable to determine encoding of content\")\n\n return result\n\n\nclass LineDecoder:\n \"\"\"\n Handles incrementally reading lines from text.\n\n Uses universal line decoding, supporting any of `\\n`, `\\r`, or `\\r\\n`\n as line endings, normalizing to `\\n`.\n \"\"\"\n\n def __init__(self) -> None:\n self.buffer = \"\"\n\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n\n if text.startswith(\"\\n\") and self.buffer and self.buffer[-1] == \"\\r\":\n # Handle the case where we have an \"\\r\\n\" split across\n # our previous input, and our new chunk.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n text = text[1:]\n\n while text:\n num_chars = len(text)\n for idx in range(num_chars):\n char = text[idx]\n next_char = None if idx + 1 == num_chars else text[idx + 1]\n if char == \"\\n\":\n lines.append(self.buffer + text[: idx + 1])\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif char == \"\\r\" and next_char == \"\\n\":\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 2 :]\n break\n elif char == \"\\r\" and next_char is not None:\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif next_char is None:\n self.buffer += text\n text = \"\"\n break\n\n return lines\n\n def flush(self) -> typing.List[str]:\n if self.buffer.endswith(\"\\r\"):\n # Handle the case where we had a trailing '\\r', which could have\n # been a '\\r\\n' pair.\n lines = [self.buffer[:-1] + \"\\n\"]\n elif self.buffer:\n lines = [self.buffer]\n else:\n lines = []\n self.buffer = \"\"\n return lines\n\n\nSUPPORTED_DECODERS = {\n \"identity\": IdentityDecoder,\n \"gzip\": GZipDecoder,\n \"deflate\": DeflateDecoder,\n \"br\": BrotliDecoder,\n}\n\n\nif brotli is None:\n SUPPORTED_DECODERS.pop(\"br\") # pragma: nocover\n", "path": "httpx/_decoders.py"}], "after_files": [{"content": "\"\"\"\nHandlers for Content-Encoding.\n\nSee: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\n\"\"\"\nimport codecs\nimport typing\nimport zlib\n\nimport chardet\n\nfrom ._exceptions import DecodingError\n\ntry:\n import brotli\nexcept ImportError: # pragma: nocover\n brotli = None\n\n\nclass Decoder:\n def decode(self, data: bytes) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def flush(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass IdentityDecoder(Decoder):\n \"\"\"\n Handle unencoded data.\n \"\"\"\n\n def decode(self, data: bytes) -> bytes:\n return data\n\n def flush(self) -> bytes:\n return b\"\"\n\n\nclass DeflateDecoder(Decoder):\n \"\"\"\n Handle 'deflate' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.first_attempt = True\n self.decompressor = zlib.decompressobj()\n\n def decode(self, data: bytes) -> bytes:\n was_first_attempt = self.first_attempt\n self.first_attempt = False\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n if was_first_attempt:\n self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n return self.decode(data)\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass GZipDecoder(Decoder):\n \"\"\"\n Handle 'gzip' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass BrotliDecoder(Decoder):\n \"\"\"\n Handle 'brotli' decoding.\n\n Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/\n or `pip install brotli`. See https://github.com/google/brotli\n Supports both 'brotlipy' and 'Brotli' packages since they share an import\n name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'\n \"\"\"\n\n def __init__(self) -> None:\n assert (\n brotli is not None\n ), \"The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'\"\n self.decompressor = brotli.Decompressor()\n self.seen_data = False\n if hasattr(self.decompressor, \"decompress\"):\n self._decompress = self.decompressor.decompress\n else:\n self._decompress = self.decompressor.process # pragma: nocover\n\n def decode(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n self.seen_data = True\n try:\n return self._decompress(data)\n except brotli.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n if not self.seen_data:\n return b\"\"\n try:\n if hasattr(self.decompressor, \"finish\"):\n self.decompressor.finish()\n return b\"\"\n except brotli.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass MultiDecoder(Decoder):\n \"\"\"\n Handle the case where multiple encodings have been applied.\n \"\"\"\n\n def __init__(self, children: typing.Sequence[Decoder]) -> None:\n \"\"\"\n 'children' should be a sequence of decoders in the order in which\n each was applied.\n \"\"\"\n # Note that we reverse the order for decoding.\n self.children = list(reversed(children))\n\n def decode(self, data: bytes) -> bytes:\n for child in self.children:\n data = child.decode(data)\n return data\n\n def flush(self) -> bytes:\n data = b\"\"\n for child in self.children:\n data = child.decode(data) + child.flush()\n return data\n\n\nclass TextDecoder:\n \"\"\"\n Handles incrementally decoding bytes into text\n \"\"\"\n\n def __init__(self, encoding: typing.Optional[str] = None):\n self.decoder: typing.Optional[codecs.IncrementalDecoder] = (\n None if encoding is None else codecs.getincrementaldecoder(encoding)()\n )\n self.detector = chardet.universaldetector.UniversalDetector()\n\n # This buffer is only needed if 'decoder' is 'None'\n # we want to trigger errors if data is getting added to\n # our internal buffer for some silly reason while\n # a decoder is discovered.\n self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()\n\n def decode(self, data: bytes) -> str:\n try:\n if self.decoder is not None:\n text = self.decoder.decode(data)\n else:\n assert self.buffer is not None\n text = \"\"\n self.detector.feed(data)\n self.buffer += data\n\n # Should be more than enough data to process, we don't\n # want to buffer too long as chardet will wait until\n # detector.close() is used to give back common\n # encodings like 'utf-8'.\n if len(self.buffer) >= 4096:\n self.decoder = codecs.getincrementaldecoder(\n self._detector_result()\n )()\n text = self.decoder.decode(bytes(self.buffer), False)\n self.buffer = None\n\n return text\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def flush(self) -> str:\n try:\n if self.decoder is None:\n # Empty string case as chardet is guaranteed to not have a guess.\n assert self.buffer is not None\n if len(self.buffer) == 0:\n return \"\"\n return bytes(self.buffer).decode(self._detector_result())\n\n return self.decoder.decode(b\"\", True)\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def _detector_result(self) -> str:\n self.detector.close()\n result = self.detector.result[\"encoding\"]\n if not result: # pragma: nocover\n raise DecodingError(\"Unable to determine encoding of content\")\n\n return result\n\n\nclass LineDecoder:\n \"\"\"\n Handles incrementally reading lines from text.\n\n Uses universal line decoding, supporting any of `\\n`, `\\r`, or `\\r\\n`\n as line endings, normalizing to `\\n`.\n \"\"\"\n\n def __init__(self) -> None:\n self.buffer = \"\"\n\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n\n if text and self.buffer and self.buffer[-1] == \"\\r\":\n if text.startswith(\"\\n\"):\n # Handle the case where we have an \"\\r\\n\" split across\n # our previous input, and our new chunk.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n text = text[1:]\n else:\n # Handle the case where we have \"\\r\" at the end of our\n # previous input.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n\n while text:\n num_chars = len(text)\n for idx in range(num_chars):\n char = text[idx]\n next_char = None if idx + 1 == num_chars else text[idx + 1]\n if char == \"\\n\":\n lines.append(self.buffer + text[: idx + 1])\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif char == \"\\r\" and next_char == \"\\n\":\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 2 :]\n break\n elif char == \"\\r\" and next_char is not None:\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif next_char is None:\n self.buffer += text\n text = \"\"\n break\n\n return lines\n\n def flush(self) -> typing.List[str]:\n if self.buffer.endswith(\"\\r\"):\n # Handle the case where we had a trailing '\\r', which could have\n # been a '\\r\\n' pair.\n lines = [self.buffer[:-1] + \"\\n\"]\n elif self.buffer:\n lines = [self.buffer]\n else:\n lines = []\n self.buffer = \"\"\n return lines\n\n\nSUPPORTED_DECODERS = {\n \"identity\": IdentityDecoder,\n \"gzip\": GZipDecoder,\n \"deflate\": DeflateDecoder,\n \"br\": BrotliDecoder,\n}\n\n\nif brotli is None:\n SUPPORTED_DECODERS.pop(\"br\") # pragma: nocover\n", "path": "httpx/_decoders.py"}]} | 3,616 | 288 |
gh_patches_debug_3254 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-43 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django: CursorWrapper expects a sequence or None for params
As seen in this partial traceback:
```
File "python3.6/site-packages/scout_apm/django/instruments/sql.py", line 29, in execute
return self.cursor.execute(sql, params)
File "python3.6/site-packages/django/db/backends/utils.py", line 80, in execute
return super(CursorDebugWrapper, self).execute(sql, params)
File "python3.6/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
IndexError: tuple index out of range
```
Currently making a PR to hopefully fix =)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/django/instruments/sql.py`
Content:
```
1 from __future__ import absolute_import
2 import logging
3
4 from scout_apm.core.monkey import monkeypatch_method
5 from scout_apm.core.tracked_request import TrackedRequest
6
7 try:
8 from django.db.backends.base.base import BaseDatabaseWrapper
9 except ImportError:
10 # Backwards compatibility for Django <1.8
11 from django.db.backends import BaseDatabaseWrapper
12
13 try:
14 from django.db.backends.utils import CursorWrapper
15 except ImportError:
16 # Backwards compatibility for Django <1.9
17 from django.db.backends.util import CursorWrapper
18
19 logger = logging.getLogger(__name__)
20
21
22 class _DetailedTracingCursorWrapper(CursorWrapper):
23 def execute(self, sql, params=()):
24 tr = TrackedRequest.instance()
25 span = tr.start_span(operation='SQL/Query')
26 span.tag('db.statement', sql)
27
28 try:
29 return self.cursor.execute(sql, params)
30 finally:
31 tr.stop_span()
32
33 def executemany(self, sql, param_list):
34 span = TrackedRequest.instance().start_span(operation='SQL/Many')
35 span.tag('db.statement', sql)
36
37 try:
38 return self.cursor.executemany(sql, param_list)
39 finally:
40 TrackedRequest.instance().stop_span()
41
42
43 # pylint: disable=too-few-public-methods
44 class SQLInstrument:
45
46 # The linter thinks the methods we monkeypatch are not used
47 # pylint: disable=W0612
48 # pylint: disable=no-method-argument
49 @staticmethod
50 def install():
51 """
52 Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`
53 method of BaseDatabaseWrapper, to return a wrapper that instruments any
54 calls going through it.
55 """
56 @monkeypatch_method(BaseDatabaseWrapper)
57 def cursor(original, self, *args, **kwargs):
58 result = original(*args, **kwargs)
59 return _DetailedTracingCursorWrapper(result, self)
60
61 logger.debug('Monkey patched SQL')
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/django/instruments/sql.py b/src/scout_apm/django/instruments/sql.py
--- a/src/scout_apm/django/instruments/sql.py
+++ b/src/scout_apm/django/instruments/sql.py
@@ -20,7 +20,7 @@
class _DetailedTracingCursorWrapper(CursorWrapper):
- def execute(self, sql, params=()):
+ def execute(self, sql, params=None):
tr = TrackedRequest.instance()
span = tr.start_span(operation='SQL/Query')
span.tag('db.statement', sql)
| {"golden_diff": "diff --git a/src/scout_apm/django/instruments/sql.py b/src/scout_apm/django/instruments/sql.py\n--- a/src/scout_apm/django/instruments/sql.py\n+++ b/src/scout_apm/django/instruments/sql.py\n@@ -20,7 +20,7 @@\n \n \n class _DetailedTracingCursorWrapper(CursorWrapper):\n- def execute(self, sql, params=()):\n+ def execute(self, sql, params=None):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n", "issue": "Django: CursorWrapper expects a sequence or None for params \nAs seen in this partial traceback:\r\n```\r\n File \"python3.6/site-packages/scout_apm/django/instruments/sql.py\", line 29, in execute\r\n return self.cursor.execute(sql, params)\r\n File \"python3.6/site-packages/django/db/backends/utils.py\", line 80, in execute\r\n return super(CursorDebugWrapper, self).execute(sql, params)\r\n File \"python3.6/site-packages/django/db/backends/utils.py\", line 65, in execute\r\n return self.cursor.execute(sql, params)\r\nIndexError: tuple index out of range\r\n```\r\nCurrently making a PR to hopefully fix =)\n", "before_files": [{"content": "from __future__ import absolute_import\nimport logging\n\nfrom scout_apm.core.monkey import monkeypatch_method\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from django.db.backends.base.base import BaseDatabaseWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.8\n from django.db.backends import BaseDatabaseWrapper\n\ntry:\n from django.db.backends.utils import CursorWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.9\n from django.db.backends.util import CursorWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass _DetailedTracingCursorWrapper(CursorWrapper):\n def execute(self, sql, params=()):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.execute(sql, params)\n finally:\n tr.stop_span()\n\n def executemany(self, sql, param_list):\n span = TrackedRequest.instance().start_span(operation='SQL/Many')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.executemany(sql, param_list)\n finally:\n TrackedRequest.instance().stop_span()\n\n\n# pylint: disable=too-few-public-methods\nclass SQLInstrument:\n\n # The linter thinks the methods we monkeypatch are not used\n # pylint: disable=W0612\n # pylint: disable=no-method-argument\n @staticmethod\n def install():\n \"\"\"\n Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`\n method of BaseDatabaseWrapper, to return a wrapper that instruments any\n calls going through it.\n \"\"\"\n @monkeypatch_method(BaseDatabaseWrapper)\n def cursor(original, self, *args, **kwargs):\n result = original(*args, **kwargs)\n return _DetailedTracingCursorWrapper(result, self)\n\n logger.debug('Monkey patched SQL')\n", "path": "src/scout_apm/django/instruments/sql.py"}], "after_files": [{"content": "from __future__ import absolute_import\nimport logging\n\nfrom scout_apm.core.monkey import monkeypatch_method\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from django.db.backends.base.base import BaseDatabaseWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.8\n from django.db.backends import BaseDatabaseWrapper\n\ntry:\n from django.db.backends.utils import CursorWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.9\n from django.db.backends.util import CursorWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass _DetailedTracingCursorWrapper(CursorWrapper):\n def execute(self, sql, params=None):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.execute(sql, params)\n finally:\n tr.stop_span()\n\n def executemany(self, sql, param_list):\n span = TrackedRequest.instance().start_span(operation='SQL/Many')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.executemany(sql, param_list)\n finally:\n TrackedRequest.instance().stop_span()\n\n\n# pylint: disable=too-few-public-methods\nclass SQLInstrument:\n\n # The linter thinks the methods we monkeypatch are not used\n # pylint: disable=W0612\n # pylint: disable=no-method-argument\n @staticmethod\n def install():\n \"\"\"\n Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`\n method of BaseDatabaseWrapper, to return a wrapper that instruments any\n calls going through it.\n \"\"\"\n @monkeypatch_method(BaseDatabaseWrapper)\n def cursor(original, self, *args, **kwargs):\n result = original(*args, **kwargs)\n return _DetailedTracingCursorWrapper(result, self)\n\n logger.debug('Monkey patched SQL')\n", "path": "src/scout_apm/django/instruments/sql.py"}]} | 956 | 132 |
gh_patches_debug_4260 | rasdani/github-patches | git_diff | qtile__qtile-1701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RTD doesn't run script/ffibuild
```
[rtd-command-info] start-time: 2020-04-30T03:23:17.858528Z, end-time: 2020-04-30T03:23:31.649069Z, duration: 13, exit-code: 0
python /home/docs/checkouts/readthedocs.org/user_builds/qtile/envs/latest/bin/sphinx-build -T -E -b readthedocs -d _build/doctrees-readthedocs -D language=en . _build/html
Running Sphinx v1.8.5
loading translations [en]... done
Unmet dependencies for '.widget.pulse_volume.PulseVolume': No module named 'libqtile.widget._pulse_audio'
building pango
python: can't open file './libqtile/pango_ffi_build.py': [Errno 2] No such file or directory
building xcursors
python: can't open file './libqtile/backend/x11/xcursors_ffi_build.py': [Errno 2] No such file or directory
building pulseaudio volume control
python: can't open file './libqtile/widget/pulseaudio_ffi.py': [Errno 2] No such file or directory
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/sphinx_qtile.py`
Content:
```
1 # Copyright (c) 2015 dmpayton
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import builtins
22 import functools
23 import importlib
24 import inspect
25 import os
26 import pprint
27 from subprocess import call
28
29 from docutils import nodes
30 from docutils.parsers.rst import Directive
31 from docutils.statemachine import ViewList
32 from jinja2 import Template
33 from sphinx.util.nodes import nested_parse_with_titles
34
35 from libqtile import command_object, configurable, widget
36
37 qtile_module_template = Template('''
38 .. qtile_class:: {{ module }}.{{ class_name }}
39 {% if no_config %}:no-config:{% endif %}
40 {% if no_commands %}:no-commands:{% endif %}
41 ''')
42
43 qtile_class_template = Template('''
44 {{ class_name }}
45 {{ class_underline }}
46
47 .. autoclass:: {{ module }}.{{ class_name }}{% for arg in extra_arguments %}
48 {{ arg }}{% endfor %}
49 {% if is_widget %}
50 .. compound::
51
52 Supported bar orientations: {{ obj.orientations }}
53 {% endif %}
54 {% if configurable %}
55 .. list-table::
56 :widths: 20 20 60
57 :header-rows: 1
58
59 * - key
60 - default
61 - description
62 {% for key, default, description in defaults %}
63 * - ``{{ key }}``
64 - ``{{ default }}``
65 - {{ description }}
66 {% endfor %}
67 {% endif %}
68 {% if commandable %}
69 {% for cmd in commands %}
70 .. automethod:: {{ module }}.{{ class_name }}.{{ cmd }}
71 {% endfor %}
72 {% endif %}
73 ''')
74
75 qtile_hooks_template = Template('''
76 .. automethod:: libqtile.hook.subscribe.{{ method }}
77 ''')
78
79
80 # Adapted from sphinxcontrib-httpdomain
81 def import_object(module_name, expr):
82 mod = __import__(module_name)
83 mod = functools.reduce(getattr, module_name.split('.')[1:], mod)
84 globals = builtins
85 if not isinstance(globals, dict):
86 globals = globals.__dict__
87 return eval(expr, globals, mod.__dict__)
88
89
90 class SimpleDirectiveMixin:
91 has_content = True
92 required_arguments = 1
93
94 def make_rst(self):
95 raise NotImplementedError
96
97 def run(self):
98 node = nodes.section()
99 node.document = self.state.document
100 result = ViewList()
101 for line in self.make_rst():
102 result.append(line, '<{0}>'.format(self.__class__.__name__))
103 nested_parse_with_titles(self.state, result, node)
104 return node.children
105
106
107 def sphinx_escape(s):
108 return pprint.pformat(s, compact=False, width=10000)
109
110
111 class QtileClass(SimpleDirectiveMixin, Directive):
112 optional_arguments = 2
113
114 def make_rst(self):
115 module, class_name = self.arguments[0].rsplit('.', 1)
116 arguments = self.arguments[1:]
117 obj = import_object(module, class_name)
118 is_configurable = ':no-config:' not in arguments
119 is_commandable = ':no-commands:' not in arguments
120 arguments = [i for i in arguments if i not in (':no-config:', ':no-commands:')]
121
122 # build up a dict of defaults using reverse MRO
123 defaults = {}
124 for klass in reversed(obj.mro()):
125 if not issubclass(klass, configurable.Configurable):
126 continue
127 if not hasattr(klass, "defaults"):
128 continue
129 klass_defaults = getattr(klass, "defaults")
130 defaults.update({
131 d[0]: d[1:] for d in klass_defaults
132 })
133 # turn the dict into a list of ("value", "default", "description") tuples
134 defaults = [
135 (k, sphinx_escape(v[0]), sphinx_escape(v[1])) for k, v in sorted(defaults.items())
136 ]
137
138 context = {
139 'module': module,
140 'class_name': class_name,
141 'class_underline': "=" * len(class_name),
142 'obj': obj,
143 'defaults': defaults,
144 'configurable': is_configurable and issubclass(obj, configurable.Configurable),
145 'commandable': is_commandable and issubclass(obj, command_object.CommandObject),
146 'is_widget': issubclass(obj, widget.base._Widget),
147 'extra_arguments': arguments,
148 }
149 if context['commandable']:
150 context['commands'] = [
151 attr for attr in dir(obj) if attr.startswith('cmd_')
152 ]
153
154 rst = qtile_class_template.render(**context)
155 for line in rst.splitlines():
156 yield line
157
158
159 class QtileHooks(SimpleDirectiveMixin, Directive):
160 def make_rst(self):
161 module, class_name = self.arguments[0].rsplit('.', 1)
162 obj = import_object(module, class_name)
163 for method in sorted(obj.hooks):
164 rst = qtile_hooks_template.render(method=method)
165 for line in rst.splitlines():
166 yield line
167
168
169 class QtileModule(SimpleDirectiveMixin, Directive):
170 # :baseclass: <base class path>
171 # :no-commands:
172 # :no-config:
173 optional_arguments = 4
174
175 def make_rst(self):
176 module = importlib.import_module(self.arguments[0])
177
178 BaseClass = None
179 if ':baseclass:' in self.arguments:
180 BaseClass = import_object(*self.arguments[
181 self.arguments.index(':baseclass:') + 1].rsplit('.', 1))
182
183 for item in dir(module):
184 obj = import_object(self.arguments[0], item)
185 if not inspect.isclass(obj) and (BaseClass and
186 not isinstance(obj, BaseClass)):
187 continue
188
189 context = {
190 'module': self.arguments[0],
191 'class_name': item,
192 'no_config': ':no-config:' in self.arguments,
193 'no_commands': ':no-commands:' in self.arguments,
194 }
195 rst = qtile_module_template.render(**context)
196 for line in rst.splitlines():
197 if not line.strip():
198 continue
199 yield line
200
201
202 def generate_keybinding_images():
203 this_dir = os.path.dirname(__file__)
204 ffibuild_path = os.path.abspath(os.path.join(this_dir, "..", "scripts", "ffibuild"))
205 call([ffibuild_path])
206 call(['make', '-C', this_dir, 'genkeyimg'])
207
208
209 def setup(app):
210 generate_keybinding_images()
211 app.add_directive('qtile_class', QtileClass)
212 app.add_directive('qtile_hooks', QtileHooks)
213 app.add_directive('qtile_module', QtileModule)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/sphinx_qtile.py b/docs/sphinx_qtile.py
--- a/docs/sphinx_qtile.py
+++ b/docs/sphinx_qtile.py
@@ -201,8 +201,8 @@
def generate_keybinding_images():
this_dir = os.path.dirname(__file__)
- ffibuild_path = os.path.abspath(os.path.join(this_dir, "..", "scripts", "ffibuild"))
- call([ffibuild_path])
+ base_dir = os.path.abspath(os.path.join(this_dir, ".."))
+ call(['make', '-C', base_dir, 'run-ffibuild'])
call(['make', '-C', this_dir, 'genkeyimg'])
| {"golden_diff": "diff --git a/docs/sphinx_qtile.py b/docs/sphinx_qtile.py\n--- a/docs/sphinx_qtile.py\n+++ b/docs/sphinx_qtile.py\n@@ -201,8 +201,8 @@\n \n def generate_keybinding_images():\n this_dir = os.path.dirname(__file__)\n- ffibuild_path = os.path.abspath(os.path.join(this_dir, \"..\", \"scripts\", \"ffibuild\"))\n- call([ffibuild_path])\n+ base_dir = os.path.abspath(os.path.join(this_dir, \"..\"))\n+ call(['make', '-C', base_dir, 'run-ffibuild'])\n call(['make', '-C', this_dir, 'genkeyimg'])\n", "issue": "RTD doesn't run script/ffibuild\n```\r\n[rtd-command-info] start-time: 2020-04-30T03:23:17.858528Z, end-time: 2020-04-30T03:23:31.649069Z, duration: 13, exit-code: 0\r\npython /home/docs/checkouts/readthedocs.org/user_builds/qtile/envs/latest/bin/sphinx-build -T -E -b readthedocs -d _build/doctrees-readthedocs -D language=en . _build/html\r\nRunning Sphinx v1.8.5\r\nloading translations [en]... done\r\nUnmet dependencies for '.widget.pulse_volume.PulseVolume': No module named 'libqtile.widget._pulse_audio'\r\nbuilding pango\r\npython: can't open file './libqtile/pango_ffi_build.py': [Errno 2] No such file or directory\r\nbuilding xcursors\r\npython: can't open file './libqtile/backend/x11/xcursors_ffi_build.py': [Errno 2] No such file or directory\r\nbuilding pulseaudio volume control\r\npython: can't open file './libqtile/widget/pulseaudio_ffi.py': [Errno 2] No such file or directory\r\n```\n", "before_files": [{"content": "# Copyright (c) 2015 dmpayton\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport builtins\nimport functools\nimport importlib\nimport inspect\nimport os\nimport pprint\nfrom subprocess import call\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import ViewList\nfrom jinja2 import Template\nfrom sphinx.util.nodes import nested_parse_with_titles\n\nfrom libqtile import command_object, configurable, widget\n\nqtile_module_template = Template('''\n.. qtile_class:: {{ module }}.{{ class_name }}\n {% if no_config %}:no-config:{% endif %}\n {% if no_commands %}:no-commands:{% endif %}\n''')\n\nqtile_class_template = Template('''\n{{ class_name }}\n{{ class_underline }}\n\n.. autoclass:: {{ module }}.{{ class_name }}{% for arg in extra_arguments %}\n {{ arg }}{% endfor %}\n {% if is_widget %}\n .. compound::\n\n Supported bar orientations: {{ obj.orientations }}\n {% endif %}\n {% if configurable %}\n .. list-table::\n :widths: 20 20 60\n :header-rows: 1\n\n * - key\n - default\n - description\n {% for key, default, description in defaults %}\n * - ``{{ key }}``\n - ``{{ default }}``\n - {{ description }}\n {% endfor %}\n {% endif %}\n {% if commandable %}\n {% for cmd in commands %}\n .. automethod:: {{ module }}.{{ class_name }}.{{ cmd }}\n {% endfor %}\n {% endif %}\n''')\n\nqtile_hooks_template = Template('''\n.. automethod:: libqtile.hook.subscribe.{{ method }}\n''')\n\n\n# Adapted from sphinxcontrib-httpdomain\ndef import_object(module_name, expr):\n mod = __import__(module_name)\n mod = functools.reduce(getattr, module_name.split('.')[1:], mod)\n globals = builtins\n if not isinstance(globals, dict):\n globals = globals.__dict__\n return eval(expr, globals, mod.__dict__)\n\n\nclass SimpleDirectiveMixin:\n has_content = True\n required_arguments = 1\n\n def make_rst(self):\n raise NotImplementedError\n\n def run(self):\n node = nodes.section()\n node.document = self.state.document\n result = ViewList()\n for line in self.make_rst():\n result.append(line, '<{0}>'.format(self.__class__.__name__))\n nested_parse_with_titles(self.state, result, node)\n return node.children\n\n\ndef sphinx_escape(s):\n return pprint.pformat(s, compact=False, width=10000)\n\n\nclass QtileClass(SimpleDirectiveMixin, Directive):\n optional_arguments = 2\n\n def make_rst(self):\n module, class_name = self.arguments[0].rsplit('.', 1)\n arguments = self.arguments[1:]\n obj = import_object(module, class_name)\n is_configurable = ':no-config:' not in arguments\n is_commandable = ':no-commands:' not in arguments\n arguments = [i for i in arguments if i not in (':no-config:', ':no-commands:')]\n\n # build up a dict of defaults using reverse MRO\n defaults = {}\n for klass in reversed(obj.mro()):\n if not issubclass(klass, configurable.Configurable):\n continue\n if not hasattr(klass, \"defaults\"):\n continue\n klass_defaults = getattr(klass, \"defaults\")\n defaults.update({\n d[0]: d[1:] for d in klass_defaults\n })\n # turn the dict into a list of (\"value\", \"default\", \"description\") tuples\n defaults = [\n (k, sphinx_escape(v[0]), sphinx_escape(v[1])) for k, v in sorted(defaults.items())\n ]\n\n context = {\n 'module': module,\n 'class_name': class_name,\n 'class_underline': \"=\" * len(class_name),\n 'obj': obj,\n 'defaults': defaults,\n 'configurable': is_configurable and issubclass(obj, configurable.Configurable),\n 'commandable': is_commandable and issubclass(obj, command_object.CommandObject),\n 'is_widget': issubclass(obj, widget.base._Widget),\n 'extra_arguments': arguments,\n }\n if context['commandable']:\n context['commands'] = [\n attr for attr in dir(obj) if attr.startswith('cmd_')\n ]\n\n rst = qtile_class_template.render(**context)\n for line in rst.splitlines():\n yield line\n\n\nclass QtileHooks(SimpleDirectiveMixin, Directive):\n def make_rst(self):\n module, class_name = self.arguments[0].rsplit('.', 1)\n obj = import_object(module, class_name)\n for method in sorted(obj.hooks):\n rst = qtile_hooks_template.render(method=method)\n for line in rst.splitlines():\n yield line\n\n\nclass QtileModule(SimpleDirectiveMixin, Directive):\n # :baseclass: <base class path>\n # :no-commands:\n # :no-config:\n optional_arguments = 4\n\n def make_rst(self):\n module = importlib.import_module(self.arguments[0])\n\n BaseClass = None\n if ':baseclass:' in self.arguments:\n BaseClass = import_object(*self.arguments[\n self.arguments.index(':baseclass:') + 1].rsplit('.', 1))\n\n for item in dir(module):\n obj = import_object(self.arguments[0], item)\n if not inspect.isclass(obj) and (BaseClass and\n not isinstance(obj, BaseClass)):\n continue\n\n context = {\n 'module': self.arguments[0],\n 'class_name': item,\n 'no_config': ':no-config:' in self.arguments,\n 'no_commands': ':no-commands:' in self.arguments,\n }\n rst = qtile_module_template.render(**context)\n for line in rst.splitlines():\n if not line.strip():\n continue\n yield line\n\n\ndef generate_keybinding_images():\n this_dir = os.path.dirname(__file__)\n ffibuild_path = os.path.abspath(os.path.join(this_dir, \"..\", \"scripts\", \"ffibuild\"))\n call([ffibuild_path])\n call(['make', '-C', this_dir, 'genkeyimg'])\n\n\ndef setup(app):\n generate_keybinding_images()\n app.add_directive('qtile_class', QtileClass)\n app.add_directive('qtile_hooks', QtileHooks)\n app.add_directive('qtile_module', QtileModule)\n", "path": "docs/sphinx_qtile.py"}], "after_files": [{"content": "# Copyright (c) 2015 dmpayton\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport builtins\nimport functools\nimport importlib\nimport inspect\nimport os\nimport pprint\nfrom subprocess import call\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import ViewList\nfrom jinja2 import Template\nfrom sphinx.util.nodes import nested_parse_with_titles\n\nfrom libqtile import command_object, configurable, widget\n\nqtile_module_template = Template('''\n.. qtile_class:: {{ module }}.{{ class_name }}\n {% if no_config %}:no-config:{% endif %}\n {% if no_commands %}:no-commands:{% endif %}\n''')\n\nqtile_class_template = Template('''\n{{ class_name }}\n{{ class_underline }}\n\n.. autoclass:: {{ module }}.{{ class_name }}{% for arg in extra_arguments %}\n {{ arg }}{% endfor %}\n {% if is_widget %}\n .. compound::\n\n Supported bar orientations: {{ obj.orientations }}\n {% endif %}\n {% if configurable %}\n .. list-table::\n :widths: 20 20 60\n :header-rows: 1\n\n * - key\n - default\n - description\n {% for key, default, description in defaults %}\n * - ``{{ key }}``\n - ``{{ default }}``\n - {{ description }}\n {% endfor %}\n {% endif %}\n {% if commandable %}\n {% for cmd in commands %}\n .. automethod:: {{ module }}.{{ class_name }}.{{ cmd }}\n {% endfor %}\n {% endif %}\n''')\n\nqtile_hooks_template = Template('''\n.. automethod:: libqtile.hook.subscribe.{{ method }}\n''')\n\n\n# Adapted from sphinxcontrib-httpdomain\ndef import_object(module_name, expr):\n mod = __import__(module_name)\n mod = functools.reduce(getattr, module_name.split('.')[1:], mod)\n globals = builtins\n if not isinstance(globals, dict):\n globals = globals.__dict__\n return eval(expr, globals, mod.__dict__)\n\n\nclass SimpleDirectiveMixin:\n has_content = True\n required_arguments = 1\n\n def make_rst(self):\n raise NotImplementedError\n\n def run(self):\n node = nodes.section()\n node.document = self.state.document\n result = ViewList()\n for line in self.make_rst():\n result.append(line, '<{0}>'.format(self.__class__.__name__))\n nested_parse_with_titles(self.state, result, node)\n return node.children\n\n\ndef sphinx_escape(s):\n return pprint.pformat(s, compact=False, width=10000)\n\n\nclass QtileClass(SimpleDirectiveMixin, Directive):\n optional_arguments = 2\n\n def make_rst(self):\n module, class_name = self.arguments[0].rsplit('.', 1)\n arguments = self.arguments[1:]\n obj = import_object(module, class_name)\n is_configurable = ':no-config:' not in arguments\n is_commandable = ':no-commands:' not in arguments\n arguments = [i for i in arguments if i not in (':no-config:', ':no-commands:')]\n\n # build up a dict of defaults using reverse MRO\n defaults = {}\n for klass in reversed(obj.mro()):\n if not issubclass(klass, configurable.Configurable):\n continue\n if not hasattr(klass, \"defaults\"):\n continue\n klass_defaults = getattr(klass, \"defaults\")\n defaults.update({\n d[0]: d[1:] for d in klass_defaults\n })\n # turn the dict into a list of (\"value\", \"default\", \"description\") tuples\n defaults = [\n (k, sphinx_escape(v[0]), sphinx_escape(v[1])) for k, v in sorted(defaults.items())\n ]\n\n context = {\n 'module': module,\n 'class_name': class_name,\n 'class_underline': \"=\" * len(class_name),\n 'obj': obj,\n 'defaults': defaults,\n 'configurable': is_configurable and issubclass(obj, configurable.Configurable),\n 'commandable': is_commandable and issubclass(obj, command_object.CommandObject),\n 'is_widget': issubclass(obj, widget.base._Widget),\n 'extra_arguments': arguments,\n }\n if context['commandable']:\n context['commands'] = [\n attr for attr in dir(obj) if attr.startswith('cmd_')\n ]\n\n rst = qtile_class_template.render(**context)\n for line in rst.splitlines():\n yield line\n\n\nclass QtileHooks(SimpleDirectiveMixin, Directive):\n def make_rst(self):\n module, class_name = self.arguments[0].rsplit('.', 1)\n obj = import_object(module, class_name)\n for method in sorted(obj.hooks):\n rst = qtile_hooks_template.render(method=method)\n for line in rst.splitlines():\n yield line\n\n\nclass QtileModule(SimpleDirectiveMixin, Directive):\n # :baseclass: <base class path>\n # :no-commands:\n # :no-config:\n optional_arguments = 4\n\n def make_rst(self):\n module = importlib.import_module(self.arguments[0])\n\n BaseClass = None\n if ':baseclass:' in self.arguments:\n BaseClass = import_object(*self.arguments[\n self.arguments.index(':baseclass:') + 1].rsplit('.', 1))\n\n for item in dir(module):\n obj = import_object(self.arguments[0], item)\n if not inspect.isclass(obj) and (BaseClass and\n not isinstance(obj, BaseClass)):\n continue\n\n context = {\n 'module': self.arguments[0],\n 'class_name': item,\n 'no_config': ':no-config:' in self.arguments,\n 'no_commands': ':no-commands:' in self.arguments,\n }\n rst = qtile_module_template.render(**context)\n for line in rst.splitlines():\n if not line.strip():\n continue\n yield line\n\n\ndef generate_keybinding_images():\n this_dir = os.path.dirname(__file__)\n base_dir = os.path.abspath(os.path.join(this_dir, \"..\"))\n call(['make', '-C', base_dir, 'run-ffibuild'])\n call(['make', '-C', this_dir, 'genkeyimg'])\n\n\ndef setup(app):\n generate_keybinding_images()\n app.add_directive('qtile_class', QtileClass)\n app.add_directive('qtile_hooks', QtileHooks)\n app.add_directive('qtile_module', QtileModule)\n", "path": "docs/sphinx_qtile.py"}]} | 2,733 | 153 |
gh_patches_debug_28327 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9482 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backtracking/minmax.py`
Content:
```
1 """
2 Minimax helps to achieve maximum score in a game by checking all possible moves.
3
4 """
5 from __future__ import annotations
6
7 import math
8
9
10 def minimax(
11 depth: int, node_index: int, is_max: bool, scores: list[int], height: float
12 ) -> int:
13 """
14 depth is current depth in game tree.
15 node_index is index of current node in scores[].
16 scores[] contains the leaves of game tree.
17 height is maximum height of game tree.
18
19 >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
20 >>> height = math.log(len(scores), 2)
21 >>> minimax(0, 0, True, scores, height)
22 65
23 >>> minimax(-1, 0, True, scores, height)
24 Traceback (most recent call last):
25 ...
26 ValueError: Depth cannot be less than 0
27 >>> minimax(0, 0, True, [], 2)
28 Traceback (most recent call last):
29 ...
30 ValueError: Scores cannot be empty
31 >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
32 >>> height = math.log(len(scores), 2)
33 >>> minimax(0, 0, True, scores, height)
34 12
35 """
36
37 if depth < 0:
38 raise ValueError("Depth cannot be less than 0")
39
40 if not scores:
41 raise ValueError("Scores cannot be empty")
42
43 if depth == height:
44 return scores[node_index]
45
46 return (
47 max(
48 minimax(depth + 1, node_index * 2, False, scores, height),
49 minimax(depth + 1, node_index * 2 + 1, False, scores, height),
50 )
51 if is_max
52 else min(
53 minimax(depth + 1, node_index * 2, True, scores, height),
54 minimax(depth + 1, node_index * 2 + 1, True, scores, height),
55 )
56 )
57
58
59 def main() -> None:
60 scores = [90, 23, 6, 33, 21, 65, 123, 34423]
61 height = math.log(len(scores), 2)
62 print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
63
64
65 if __name__ == "__main__":
66 import doctest
67
68 doctest.testmod()
69 main()
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backtracking/minmax.py b/backtracking/minmax.py
deleted file mode 100644
--- a/backtracking/minmax.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-Minimax helps to achieve maximum score in a game by checking all possible moves.
-
-"""
-from __future__ import annotations
-
-import math
-
-
-def minimax(
- depth: int, node_index: int, is_max: bool, scores: list[int], height: float
-) -> int:
- """
- depth is current depth in game tree.
- node_index is index of current node in scores[].
- scores[] contains the leaves of game tree.
- height is maximum height of game tree.
-
- >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
- >>> height = math.log(len(scores), 2)
- >>> minimax(0, 0, True, scores, height)
- 65
- >>> minimax(-1, 0, True, scores, height)
- Traceback (most recent call last):
- ...
- ValueError: Depth cannot be less than 0
- >>> minimax(0, 0, True, [], 2)
- Traceback (most recent call last):
- ...
- ValueError: Scores cannot be empty
- >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
- >>> height = math.log(len(scores), 2)
- >>> minimax(0, 0, True, scores, height)
- 12
- """
-
- if depth < 0:
- raise ValueError("Depth cannot be less than 0")
-
- if not scores:
- raise ValueError("Scores cannot be empty")
-
- if depth == height:
- return scores[node_index]
-
- return (
- max(
- minimax(depth + 1, node_index * 2, False, scores, height),
- minimax(depth + 1, node_index * 2 + 1, False, scores, height),
- )
- if is_max
- else min(
- minimax(depth + 1, node_index * 2, True, scores, height),
- minimax(depth + 1, node_index * 2 + 1, True, scores, height),
- )
- )
-
-
-def main() -> None:
- scores = [90, 23, 6, 33, 21, 65, 123, 34423]
- height = math.log(len(scores), 2)
- print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
- main()
| {"golden_diff": "diff --git a/backtracking/minmax.py b/backtracking/minmax.py\ndeleted file mode 100644\n--- a/backtracking/minmax.py\n+++ /dev/null\n@@ -1,69 +0,0 @@\n-\"\"\"\n-Minimax helps to achieve maximum score in a game by checking all possible moves.\n-\n-\"\"\"\n-from __future__ import annotations\n-\n-import math\n-\n-\n-def minimax(\n- depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n-) -> int:\n- \"\"\"\n- depth is current depth in game tree.\n- node_index is index of current node in scores[].\n- scores[] contains the leaves of game tree.\n- height is maximum height of game tree.\n-\n- >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n- >>> height = math.log(len(scores), 2)\n- >>> minimax(0, 0, True, scores, height)\n- 65\n- >>> minimax(-1, 0, True, scores, height)\n- Traceback (most recent call last):\n- ...\n- ValueError: Depth cannot be less than 0\n- >>> minimax(0, 0, True, [], 2)\n- Traceback (most recent call last):\n- ...\n- ValueError: Scores cannot be empty\n- >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]\n- >>> height = math.log(len(scores), 2)\n- >>> minimax(0, 0, True, scores, height)\n- 12\n- \"\"\"\n-\n- if depth < 0:\n- raise ValueError(\"Depth cannot be less than 0\")\n-\n- if not scores:\n- raise ValueError(\"Scores cannot be empty\")\n-\n- if depth == height:\n- return scores[node_index]\n-\n- return (\n- max(\n- minimax(depth + 1, node_index * 2, False, scores, height),\n- minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n- )\n- if is_max\n- else min(\n- minimax(depth + 1, node_index * 2, True, scores, height),\n- minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n- )\n- )\n-\n-\n-def main() -> None:\n- scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n- height = math.log(len(scores), 2)\n- print(f\"Optimal value : {minimax(0, 0, True, scores, height)}\")\n-\n-\n-if __name__ == \"__main__\":\n- import doctest\n-\n- doctest.testmod()\n- main()\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "\"\"\"\nMinimax helps to achieve maximum score in a game by checking all possible moves.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport math\n\n\ndef minimax(\n depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n) -> int:\n \"\"\"\n depth is current depth in game tree.\n node_index is index of current node in scores[].\n scores[] contains the leaves of game tree.\n height is maximum height of game tree.\n\n >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n >>> height = math.log(len(scores), 2)\n >>> minimax(0, 0, True, scores, height)\n 65\n >>> minimax(-1, 0, True, scores, height)\n Traceback (most recent call last):\n ...\n ValueError: Depth cannot be less than 0\n >>> minimax(0, 0, True, [], 2)\n Traceback (most recent call last):\n ...\n ValueError: Scores cannot be empty\n >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]\n >>> height = math.log(len(scores), 2)\n >>> minimax(0, 0, True, scores, height)\n 12\n \"\"\"\n\n if depth < 0:\n raise ValueError(\"Depth cannot be less than 0\")\n\n if not scores:\n raise ValueError(\"Scores cannot be empty\")\n\n if depth == height:\n return scores[node_index]\n\n return (\n max(\n minimax(depth + 1, node_index * 2, False, scores, height),\n minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n )\n if is_max\n else min(\n minimax(depth + 1, node_index * 2, True, scores, height),\n minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n )\n )\n\n\ndef main() -> None:\n scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n height = math.log(len(scores), 2)\n print(f\"Optimal value : {minimax(0, 0, True, scores, height)}\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n main()\n", "path": "backtracking/minmax.py"}], "after_files": [{"content": null, "path": "backtracking/minmax.py"}]} | 1,013 | 680 |
gh_patches_debug_61784 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Map Uruguay's "unknown" to "oil"
For Uruguay, everything that is reported as "thermal" is currently assigned to "unknown" category.

**There are some hints this is entirely based on "oil" as fuel.**
1. the IEA yearly reports show oil as only possible source for "thermal" https://www.iea.org/statistics/statisticssearch/report/?country=URUGUAY&product=electricityandheat&year=2015
2. Based on the "list of power stations" https://en.wikipedia.org/wiki/List_of_power_stations_in_Uruguay,
I found info in 3.
3. going through the list, I always ended up on http://globalenergyobservatory.org/, reporting "fuel oil" or "distillate oil" as primary fuel:
3.1 http://globalenergyobservatory.org/geoid/42286
3.2 http://globalenergyobservatory.org/geoid/42287
3.3 http://globalenergyobservatory.org/geoid/42290
3.4 http://globalenergyobservatory.org/geoid/42291
If that's enough evidence, let's change it in the parser! 😃
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/UY.py`
Content:
```
1 #!/usr/bin/python3
2
3 import arrow
4 import dateutil
5 import re
6 import requests
7
8 # BeautifulSoup is used to parse HTML to get information
9 from bs4 import BeautifulSoup
10
11 tz = 'America/Montevideo'
12
13 MAP_GENERATION = {
14 'Hidráulica': 'hydro',
15 'Eólica': 'wind',
16 'Fotovoltaica': 'solar',
17 'Biomasa': 'biomass',
18 'Térmica': 'unknown'
19 }
20 INV_MAP_GENERATION = dict([(v, k) for (k, v) in MAP_GENERATION.items()])
21
22 SALTO_GRANDE_URL = 'http://www.cammesa.com/uflujpot.nsf/FlujoW?OpenAgent&Tensiones y Flujos de Potencia&'
23
24
25 def get_salto_grande(session):
26 """
27 Finds the current generation from the Salto Grande Dam that is
28 allocated to Uruguay.
29 """
30
31 current_time = arrow.now('UTC-3')
32 if current_time.minute < 30:
33 # Data for current hour seems to be available after 30mins.
34 current_time = current_time.shift(hours=-1)
35 lookup_time = current_time.floor('hour').format('DD/MM/YYYY HH:mm')
36
37 s = session or requests.Session()
38 url = SALTO_GRANDE_URL + lookup_time
39 response = s.get(url)
40 soup = BeautifulSoup(response.text, 'html.parser')
41
42 tie = soup.find("div", style = "position:absolute; top:143; left:597")
43 generation = float(tie.text)
44
45 return generation
46
47
48 def parse_page(session):
49 r = session or requests.session()
50 url = 'http://www.ute.com.uy/SgePublico/ConsPotenciaGeneracionArbolXFuente.aspx'
51 response = requests.get(url)
52 soup = BeautifulSoup(response.text, 'html.parser')
53
54 datefield = soup.find('span', attrs={'id': 'ctl00_ContentPlaceHolder1_lblUltFecScada'})
55 datestr = re.findall('\d\d/\d\d/\d\d\d\d \d+:\d\d', str(datefield.contents[0]))[0]
56 date = arrow.get(datestr, 'DD/MM/YYYY h:mm').replace(tzinfo=dateutil.tz.gettz(tz))
57
58 table = soup.find('table', attrs={'id': 'ctl00_ContentPlaceHolder1_gridPotenciasNivel1'})
59
60 obj = {
61 'datetime': date.datetime
62 }
63
64 for tr in table.find_all('tr'):
65 tds = tr.find_all('td')
66 if not len(tds): continue
67
68 key = tds[0].find_all('b')
69 # Go back one level up if the b tag is not there
70 if not len(key): key = tds[0].find_all('font')
71 k = key[0].contents[0]
72
73 value = tds[1].find_all('b')
74 # Go back one level up if the b tag is not there
75 if not len(value): value = tds[1].find_all('font')
76 v_str = value[0].contents[0]
77 if v_str.find(',') > -1 and v_str.find('.') > -1:
78 # there can be values like "1.012,5"
79 v_str = v_str.replace('.', '')
80 v_str = v_str.replace(',', '.')
81 else:
82 # just replace decimal separator, like "125,2"
83 v_str = v_str.replace(',', '.')
84 v = float(v_str)
85
86 # solar reports -0.1 at night, make it at least 0
87 v = max(v, 0)
88
89 obj[k] = v
90
91 # https://github.com/tmrowco/electricitymap/issues/1325#issuecomment-380453296
92 salto_grande = get_salto_grande(session)
93 obj['Hidráulica'] = obj.get('Hidráulica', 0.0) + salto_grande
94
95 return obj
96
97
98 def fetch_production(zone_key='UY', session=None, target_datetime=None, logger=None):
99 if target_datetime:
100 raise NotImplementedError('This parser is not yet able to parse past dates')
101
102 obj = parse_page(session)
103
104 data = {
105 'zoneKey': zone_key,
106 'datetime': obj['datetime'],
107 'production': dict([(k, obj[INV_MAP_GENERATION[k]]) for k in INV_MAP_GENERATION.keys()]),
108 'source': 'ute.com.uy'
109 }
110
111 return data
112
113
114 def fetch_exchange(zone_key1='UY', zone_key2='BR-S', session=None, target_datetime=None, logger=None):
115 """Requests the last known power exchange (in MW) between two countries
116
117 Arguments:
118 zone_key (optional) -- used in case a parser is able to fetch multiple countries
119 session (optional) -- request session passed in order to re-use an existing session
120
121 Return:
122 A dictionary in the form:
123 {
124 'sortedZoneKeys': 'DK->NO',
125 'datetime': '2017-01-01T00:00:00Z',
126 'netFlow': 0.0,
127 'source': 'mysource.com'
128 }
129 """
130 if target_datetime:
131 raise NotImplementedError('This parser is not yet able to parse past dates')
132
133 # set comparison
134 if {zone_key1, zone_key2} != {'UY', 'BR'}:
135 return None
136
137 obj = parse_page(session)
138 netFlow = obj['Interconexión con Brasil'] # this represents BR->UY (imports)
139 if zone_key1 != 'BR': netFlow *= -1
140
141 data = {
142 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),
143 'datetime': obj['datetime'],
144 'netFlow': netFlow,
145 'source': 'ute.com.uy'
146 }
147
148 return data
149
150
151 if __name__ == '__main__':
152 print('fetch_production() ->')
153 print(fetch_production())
154 print('fetch_exchange(UY, BR) ->')
155 print(fetch_exchange('UY', 'BR'))
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/UY.py b/parsers/UY.py
--- a/parsers/UY.py
+++ b/parsers/UY.py
@@ -15,7 +15,7 @@
'Eólica': 'wind',
'Fotovoltaica': 'solar',
'Biomasa': 'biomass',
- 'Térmica': 'unknown'
+ 'Térmica': 'oil'
}
INV_MAP_GENERATION = dict([(v, k) for (k, v) in MAP_GENERATION.items()])
| {"golden_diff": "diff --git a/parsers/UY.py b/parsers/UY.py\n--- a/parsers/UY.py\n+++ b/parsers/UY.py\n@@ -15,7 +15,7 @@\n 'E\u00f3lica': 'wind',\n 'Fotovoltaica': 'solar',\n 'Biomasa': 'biomass',\n- 'T\u00e9rmica': 'unknown'\n+ 'T\u00e9rmica': 'oil'\n }\n INV_MAP_GENERATION = dict([(v, k) for (k, v) in MAP_GENERATION.items()])\n", "issue": "Map Uruguay's \"unknown\" to \"oil\"\nFor Uruguay, everything that is reported as \"thermal\" is currently assigned to \"unknown\" category.\r\n\r\n\r\n\r\n**There are some hints this is entirely based on \"oil\" as fuel.**\r\n\r\n1. the IEA yearly reports show oil as only possible source for \"thermal\" https://www.iea.org/statistics/statisticssearch/report/?country=URUGUAY&product=electricityandheat&year=2015\r\n\r\n2. Based on the \"list of power stations\" https://en.wikipedia.org/wiki/List_of_power_stations_in_Uruguay,\r\nI found info in 3.\r\n\r\n3. going through the list, I always ended up on http://globalenergyobservatory.org/, reporting \"fuel oil\" or \"distillate oil\" as primary fuel:\r\n3.1 http://globalenergyobservatory.org/geoid/42286\r\n3.2 http://globalenergyobservatory.org/geoid/42287\r\n3.3 http://globalenergyobservatory.org/geoid/42290\r\n3.4 http://globalenergyobservatory.org/geoid/42291\r\n\r\nIf that's enough evidence, let's change it in the parser! \ud83d\ude03\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport arrow\nimport dateutil\nimport re\nimport requests\n\n# BeautifulSoup is used to parse HTML to get information\nfrom bs4 import BeautifulSoup\n\ntz = 'America/Montevideo'\n\nMAP_GENERATION = {\n 'Hidr\u00e1ulica': 'hydro',\n 'E\u00f3lica': 'wind',\n 'Fotovoltaica': 'solar',\n 'Biomasa': 'biomass',\n 'T\u00e9rmica': 'unknown'\n}\nINV_MAP_GENERATION = dict([(v, k) for (k, v) in MAP_GENERATION.items()])\n\nSALTO_GRANDE_URL = 'http://www.cammesa.com/uflujpot.nsf/FlujoW?OpenAgent&Tensiones y Flujos de Potencia&'\n\n\ndef get_salto_grande(session):\n \"\"\"\n Finds the current generation from the Salto Grande Dam that is\n allocated to Uruguay.\n \"\"\"\n\n current_time = arrow.now('UTC-3')\n if current_time.minute < 30:\n # Data for current hour seems to be available after 30mins.\n current_time = current_time.shift(hours=-1)\n lookup_time = current_time.floor('hour').format('DD/MM/YYYY HH:mm')\n\n s = session or requests.Session()\n url = SALTO_GRANDE_URL + lookup_time\n response = s.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n tie = soup.find(\"div\", style = \"position:absolute; top:143; left:597\")\n generation = float(tie.text)\n\n return generation\n\n\ndef parse_page(session):\n r = session or requests.session()\n url = 'http://www.ute.com.uy/SgePublico/ConsPotenciaGeneracionArbolXFuente.aspx'\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n datefield = soup.find('span', attrs={'id': 'ctl00_ContentPlaceHolder1_lblUltFecScada'})\n datestr = re.findall('\\d\\d/\\d\\d/\\d\\d\\d\\d \\d+:\\d\\d', str(datefield.contents[0]))[0]\n date = arrow.get(datestr, 'DD/MM/YYYY h:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n table = soup.find('table', attrs={'id': 'ctl00_ContentPlaceHolder1_gridPotenciasNivel1'})\n\n obj = {\n 'datetime': date.datetime\n }\n\n for tr in table.find_all('tr'):\n tds = tr.find_all('td')\n if not len(tds): continue\n\n key = tds[0].find_all('b')\n # Go back one level up if the b tag is not there\n if not len(key): key = tds[0].find_all('font')\n k = key[0].contents[0]\n\n value = tds[1].find_all('b')\n # Go back one level up if the b tag is not there\n if not len(value): value = tds[1].find_all('font')\n v_str = value[0].contents[0]\n if v_str.find(',') > -1 and v_str.find('.') > -1:\n # there can be values like \"1.012,5\"\n v_str = v_str.replace('.', '')\n v_str = v_str.replace(',', '.')\n else:\n # just replace decimal separator, like \"125,2\"\n v_str = v_str.replace(',', '.')\n v = float(v_str)\n\n # solar reports -0.1 at night, make it at least 0\n v = max(v, 0)\n\n obj[k] = v\n\n # https://github.com/tmrowco/electricitymap/issues/1325#issuecomment-380453296\n salto_grande = get_salto_grande(session)\n obj['Hidr\u00e1ulica'] = obj.get('Hidr\u00e1ulica', 0.0) + salto_grande\n\n return obj\n\n\ndef fetch_production(zone_key='UY', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n obj = parse_page(session)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': obj['datetime'],\n 'production': dict([(k, obj[INV_MAP_GENERATION[k]]) for k in INV_MAP_GENERATION.keys()]),\n 'source': 'ute.com.uy'\n }\n\n return data\n\n\ndef fetch_exchange(zone_key1='UY', zone_key2='BR-S', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n # set comparison\n if {zone_key1, zone_key2} != {'UY', 'BR'}:\n return None\n\n obj = parse_page(session)\n netFlow = obj['Interconexi\u00f3n con Brasil'] # this represents BR->UY (imports)\n if zone_key1 != 'BR': netFlow *= -1\n\n data = {\n 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),\n 'datetime': obj['datetime'],\n 'netFlow': netFlow,\n 'source': 'ute.com.uy'\n }\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(UY, BR) ->')\n print(fetch_exchange('UY', 'BR'))\n", "path": "parsers/UY.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport arrow\nimport dateutil\nimport re\nimport requests\n\n# BeautifulSoup is used to parse HTML to get information\nfrom bs4 import BeautifulSoup\n\ntz = 'America/Montevideo'\n\nMAP_GENERATION = {\n 'Hidr\u00e1ulica': 'hydro',\n 'E\u00f3lica': 'wind',\n 'Fotovoltaica': 'solar',\n 'Biomasa': 'biomass',\n 'T\u00e9rmica': 'oil'\n}\nINV_MAP_GENERATION = dict([(v, k) for (k, v) in MAP_GENERATION.items()])\n\nSALTO_GRANDE_URL = 'http://www.cammesa.com/uflujpot.nsf/FlujoW?OpenAgent&Tensiones y Flujos de Potencia&'\n\n\ndef get_salto_grande(session):\n \"\"\"\n Finds the current generation from the Salto Grande Dam that is\n allocated to Uruguay.\n \"\"\"\n\n current_time = arrow.now('UTC-3')\n if current_time.minute < 30:\n # Data for current hour seems to be available after 30mins.\n current_time = current_time.shift(hours=-1)\n lookup_time = current_time.floor('hour').format('DD/MM/YYYY HH:mm')\n\n s = session or requests.Session()\n url = SALTO_GRANDE_URL + lookup_time\n response = s.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n tie = soup.find(\"div\", style = \"position:absolute; top:143; left:597\")\n generation = float(tie.text)\n\n return generation\n\n\ndef parse_page(session):\n r = session or requests.session()\n url = 'http://www.ute.com.uy/SgePublico/ConsPotenciaGeneracionArbolXFuente.aspx'\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n datefield = soup.find('span', attrs={'id': 'ctl00_ContentPlaceHolder1_lblUltFecScada'})\n datestr = re.findall('\\d\\d/\\d\\d/\\d\\d\\d\\d \\d+:\\d\\d', str(datefield.contents[0]))[0]\n date = arrow.get(datestr, 'DD/MM/YYYY h:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n table = soup.find('table', attrs={'id': 'ctl00_ContentPlaceHolder1_gridPotenciasNivel1'})\n\n obj = {\n 'datetime': date.datetime\n }\n\n for tr in table.find_all('tr'):\n tds = tr.find_all('td')\n if not len(tds): continue\n\n key = tds[0].find_all('b')\n # Go back one level up if the b tag is not there\n if not len(key): key = tds[0].find_all('font')\n k = key[0].contents[0]\n\n value = tds[1].find_all('b')\n # Go back one level up if the b tag is not there\n if not len(value): value = tds[1].find_all('font')\n v_str = value[0].contents[0]\n if v_str.find(',') > -1 and v_str.find('.') > -1:\n # there can be values like \"1.012,5\"\n v_str = v_str.replace('.', '')\n v_str = v_str.replace(',', '.')\n else:\n # just replace decimal separator, like \"125,2\"\n v_str = v_str.replace(',', '.')\n v = float(v_str)\n\n # solar reports -0.1 at night, make it at least 0\n v = max(v, 0)\n\n obj[k] = v\n\n # https://github.com/tmrowco/electricitymap/issues/1325#issuecomment-380453296\n salto_grande = get_salto_grande(session)\n obj['Hidr\u00e1ulica'] = obj.get('Hidr\u00e1ulica', 0.0) + salto_grande\n\n return obj\n\n\ndef fetch_production(zone_key='UY', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n obj = parse_page(session)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': obj['datetime'],\n 'production': dict([(k, obj[INV_MAP_GENERATION[k]]) for k in INV_MAP_GENERATION.keys()]),\n 'source': 'ute.com.uy'\n }\n\n return data\n\n\ndef fetch_exchange(zone_key1='UY', zone_key2='BR-S', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n # set comparison\n if {zone_key1, zone_key2} != {'UY', 'BR'}:\n return None\n\n obj = parse_page(session)\n netFlow = obj['Interconexi\u00f3n con Brasil'] # this represents BR->UY (imports)\n if zone_key1 != 'BR': netFlow *= -1\n\n data = {\n 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),\n 'datetime': obj['datetime'],\n 'netFlow': netFlow,\n 'source': 'ute.com.uy'\n }\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(UY, BR) ->')\n print(fetch_exchange('UY', 'BR'))\n", "path": "parsers/UY.py"}]} | 2,309 | 120 |
gh_patches_debug_11098 | rasdani/github-patches | git_diff | Qiskit__qiskit-2416 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rep_times should be integer and not float in provider models
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**:
- **Python version**:
- **Operating system**:
### What is the current behavior?
Marshmallow provider models expect float as rep_time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/providers/models/backendconfiguration.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2018.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Model and schema for backend configuration."""
16
17 from marshmallow.validate import Length, OneOf, Range, Regexp
18
19 from qiskit.validation import BaseModel, BaseSchema, bind_schema
20 from qiskit.validation.fields import (Boolean, DateTime, Integer, List, Nested, String,
21 Complex, Float, Dict, InstructionParameter)
22 from qiskit.validation.validate import PatternProperties
23
24
25 class GateConfigSchema(BaseSchema):
26 """Schema for GateConfig."""
27
28 # Required properties.
29 name = String(required=True)
30 parameters = List(String(), required=True)
31 qasm_def = String(required=True)
32
33 # Optional properties.
34 coupling_map = List(List(Integer(),
35 validate=Length(min=1)),
36 validate=Length(min=1))
37 latency_map = List(List(Integer(validate=OneOf([0, 1])),
38 validate=Length(min=1)),
39 validate=Length(min=1))
40 conditional = Boolean()
41 description = String()
42
43
44 class UchannelLOSchema(BaseSchema):
45 """Schema for uchannel LO."""
46
47 # Required properties.
48 q = Integer(required=True, validate=Range(min=0))
49 scale = Complex(required=True)
50
51 # Optional properties.
52
53
54 class PulseHamiltonianSchema(BaseSchema):
55 """Schema for PulseHamiltonian."""
56
57 # Required properties.
58 h_str = List(String(), validate=Length(min=1), required=True)
59 dim_osc = List(Integer(validate=Range(min=1)), required=True)
60 dim_qub = List(Integer(validate=Range(min=2)), required=True)
61 vars = Dict(validate=PatternProperties({
62 Regexp('^([a-z0-9])+$'): InstructionParameter()
63 }), required=True)
64
65 # Optional properties.
66
67
68 class BackendConfigurationSchema(BaseSchema):
69 """Schema for BackendConfiguration."""
70 # Required properties.
71 backend_name = String(required=True)
72 backend_version = String(required=True,
73 validate=Regexp("[0-9]+.[0-9]+.[0-9]+$"))
74 n_qubits = Integer(required=True, validate=Range(min=1))
75 basis_gates = List(String(), required=True,
76 validate=Length(min=1))
77 gates = Nested(GateConfigSchema, required=True, many=True,
78 validate=Length(min=1))
79 local = Boolean(required=True)
80 simulator = Boolean(required=True)
81 conditional = Boolean(required=True)
82 open_pulse = Boolean(required=True)
83 memory = Boolean(required=True)
84 max_shots = Integer(required=True, validate=Range(min=1))
85
86 # Optional properties.
87 max_experiments = Integer(validate=Range(min=1))
88 sample_name = String()
89 coupling_map = List(List(Integer(),
90 validate=Length(min=1)),
91 validate=Length(min=1), allow_none=True)
92 n_registers = Integer(validate=Range(min=1))
93 register_map = List(List(Integer(validate=OneOf([0, 1])),
94 validate=Length(min=1)),
95 validate=Length(min=1))
96 configurable = Boolean()
97 credits_required = Boolean()
98 online_date = DateTime()
99 display_name = String()
100 description = String()
101 tags = List(String())
102
103
104 class QasmBackendConfigurationSchema(BackendConfigurationSchema):
105 """Schema for Qasm backend."""
106 open_pulse = Boolean(required=True, validate=OneOf([False]))
107
108
109 class PulseBackendConfigurationSchema(QasmBackendConfigurationSchema):
110 """Schema for pulse backend"""
111 # Required properties.
112 open_pulse = Boolean(required=True, validate=OneOf([True]))
113 n_uchannels = Integer(required=True, validate=Range(min=0))
114 u_channel_lo = List(Nested(UchannelLOSchema, validate=Length(min=1),
115 required=True, many=True))
116 meas_levels = List(Integer(), validate=Length(min=1), required=True)
117 qubit_lo_range = List(List(Float(validate=Range(min=0)),
118 validate=Length(equal=2)), required=True)
119 meas_lo_range = List(List(Float(validate=Range(min=0)),
120 validate=Length(equal=2)), required=True)
121 dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name
122 dtm = Float(required=True, validate=Range(min=0))
123 rep_times = List(Float(validate=Range(min=0)), required=True)
124 meas_kernels = List(String(), required=True)
125 discriminators = List(String(), required=True)
126
127 # Optional properties.
128 meas_map = List(List(Integer(), validate=Length(min=1)))
129 channel_bandwidth = List(List(Float(), validate=Length(equal=2)))
130 acquisition_latency = List(List(Integer()))
131 conditional_latency = List(List(Integer()))
132 hamiltonian = PulseHamiltonianSchema()
133
134
135 @bind_schema(GateConfigSchema)
136 class GateConfig(BaseModel):
137 """Model for GateConfig.
138
139 Please note that this class only describes the required fields. For the
140 full description of the model, please check ``GateConfigSchema``.
141
142 Attributes:
143 name (str): the gate name as it will be referred to in Qasm.
144 parameters (list[str]): variable names for the gate parameters (if any).
145 qasm_def (str): definition of this gate in terms of Qasm primitives U
146 and CX.
147 """
148
149 def __init__(self, name, parameters, qasm_def, **kwargs):
150 self.name = name
151 self.parameters = parameters
152 self.qasm_def = qasm_def
153
154 super().__init__(**kwargs)
155
156
157 @bind_schema(UchannelLOSchema)
158 class UchannelLO(BaseModel):
159 """Model for GateConfig.
160
161 Please note that this class only describes the required fields. For the
162 full description of the model, please check ``GateConfigSchema``.
163
164 Attributes:
165 q (int): Qubit that scale corresponds too.
166 scale (complex): Scale factor for qubit frequency.
167 """
168 def __init__(self, q, scale, **kwargs):
169
170 self.q = q
171 self.scale = scale
172
173 super().__init__(q=q, scale=scale, **kwargs)
174
175
176 @bind_schema(BackendConfigurationSchema)
177 class BackendConfiguration(BaseModel):
178 """Model for BackendConfiguration.
179
180 Please note that this class only describes the required fields. For the
181 full description of the model, please check ``BackendConfigurationSchema``.
182 Attributes:
183 backend_name (str): backend name.
184 backend_version (str): backend version in the form X.Y.Z.
185 n_qubits (int): number of qubits.
186 basis_gates (list[str]): list of basis gates names on the backend.
187 gates (GateConfig): list of basis gates on the backend.
188 local (bool): backend is local or remote.
189 simulator (bool): backend is a simulator.
190 conditional (bool): backend supports conditional operations.
191 open_pulse (bool): backend supports open pulse.
192 memory (bool): backend supports memory.
193 max_shots (int): maximum number of shots supported.
194 """
195
196 def __init__(self, backend_name, backend_version, n_qubits, basis_gates,
197 gates, local, simulator, conditional, open_pulse, memory,
198 max_shots, **kwargs):
199
200 self.backend_name = backend_name
201 self.backend_version = backend_version
202 self.n_qubits = n_qubits
203 self.basis_gates = basis_gates
204 self.gates = gates
205 self.local = local
206 self.simulator = simulator
207 self.conditional = conditional
208 self.open_pulse = open_pulse
209 self.memory = memory
210 self.max_shots = max_shots
211
212 super().__init__(**kwargs)
213
214
215 @bind_schema(QasmBackendConfigurationSchema)
216 class QasmBackendConfiguration(BackendConfiguration):
217 """Model for QasmBackendConfiguration.
218
219 Please note that this class only describes the required fields. For the
220 full description of the model, please check ``QasmBackendConfigurationSchema``.
221 Attributes:
222 backend_name (str): backend name.
223 backend_version (str): backend version in the form X.Y.Z.
224 n_qubits (int): number of qubits.
225 basis_gates (list[str]): list of basis gates names on the backend.
226 gates (GateConfig): list of basis gates on the backend.
227 local (bool): backend is local or remote.
228 simulator (bool): backend is a simulator.
229 conditional (bool): backend supports conditional operations.
230 open_pulse (bool): backend supports open pulse.
231 memory (bool): backend supports memory.
232 max_shots (int): maximum number of shots supported.
233 **kwargs: Optional fields.
234 """
235
236 def __init__(self, backend_name, backend_version, n_qubits, basis_gates,
237 gates, local, simulator, conditional, open_pulse, memory,
238 max_shots, **kwargs):
239
240 super().__init__(backend_name=backend_name, backend_version=backend_version,
241 n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,
242 local=local, simulator=simulator, conditional=conditional,
243 open_pulse=open_pulse, memory=memory, max_shots=max_shots,
244 **kwargs)
245
246
247 @bind_schema(PulseBackendConfigurationSchema)
248 class PulseBackendConfiguration(BackendConfiguration):
249 """Model for PulseBackendConfiguration.
250
251 Please note that this class only describes the required fields. For the
252 full description of the model, please check ``PulseBackendConfigurationSchema``.
253 Attributes:
254 backend_name (str): backend name.
255 backend_version (str): backend version in the form X.Y.Z.
256 n_qubits (int): number of qubits.
257 basis_gates (list[str]): list of basis gates names on the backend.
258 gates (GateConfig): list of basis gates on the backend.
259 local (bool): backend is local or remote.
260 simulator (bool): backend is a simulator.
261 conditional (bool): backend supports conditional operations.
262 open_pulse (bool): backend supports open pulse.
263 memory (bool): backend supports memory.
264 max_shots (int): maximum number of shots supported.
265 n_uchannels (int): Number of u-channels.
266 u_channel_lo (list[UchannelLO]): U-channel relationship on device los.
267 meas_levels (list[int]): Supported measurement levels.
268 qubit_lo_range (list[list[float]]): Qubit lo ranges for each qubit
269 with form (min, max) in GHz.
270 meas_lo_range (list[list[float]]): Measurement lo ranges for each qubit
271 with form (min, max) in GHz.
272 dt (float): Qubit drive channel timestep in nanoseconds.
273 dtm (float): Measurement drive channel timestep in nanoseconds.
274 rep_times (list[float]): Supported repetition times for device in microseconds.
275 meas_kernels (list[str]): Supported measurement kernels.
276 discriminators: Supported discriminators.
277 **kwargs: Optional fields.
278 """
279
280 def __init__(self, backend_name, backend_version, n_qubits, basis_gates,
281 gates, local, simulator, conditional, open_pulse, memory,
282 max_shots, n_uchannels, u_channel_lo, meas_levels,
283 qubit_lo_range, meas_lo_range, dt, dtm, rep_times, meas_kernels,
284 discriminators, **kwargs):
285
286 self.n_uchannels = n_uchannels
287 self.u_channel_lo = u_channel_lo
288 self.meas_levels = meas_levels
289 self.qubit_lo_range = qubit_lo_range
290 self.meas_lo_range = meas_lo_range
291 self.dt = dt # pylint: disable=invalid-name
292 self.dtm = dtm
293 self.rep_times = rep_times
294 self.meas_kernels = meas_kernels
295 self.discriminators = discriminators
296
297 super().__init__(backend_name=backend_name, backend_version=backend_version,
298 n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,
299 local=local, simulator=simulator, conditional=conditional,
300 open_pulse=open_pulse, memory=memory, max_shots=max_shots,
301 n_uchannels=n_uchannels, u_channel_lo=u_channel_lo,
302 meas_levels=meas_levels, qubit_lo_range=qubit_lo_range,
303 meas_lo_range=meas_lo_range, dt=dt, dtm=dtm,
304 rep_times=rep_times, meas_kernels=meas_kernels,
305 discriminators=discriminators, **kwargs)
306
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/providers/models/backendconfiguration.py b/qiskit/providers/models/backendconfiguration.py
--- a/qiskit/providers/models/backendconfiguration.py
+++ b/qiskit/providers/models/backendconfiguration.py
@@ -120,7 +120,7 @@
validate=Length(equal=2)), required=True)
dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name
dtm = Float(required=True, validate=Range(min=0))
- rep_times = List(Float(validate=Range(min=0)), required=True)
+ rep_times = List(Integer(validate=Range(min=0)), required=True)
meas_kernels = List(String(), required=True)
discriminators = List(String(), required=True)
| {"golden_diff": "diff --git a/qiskit/providers/models/backendconfiguration.py b/qiskit/providers/models/backendconfiguration.py\n--- a/qiskit/providers/models/backendconfiguration.py\n+++ b/qiskit/providers/models/backendconfiguration.py\n@@ -120,7 +120,7 @@\n validate=Length(equal=2)), required=True)\n dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name\n dtm = Float(required=True, validate=Range(min=0))\n- rep_times = List(Float(validate=Range(min=0)), required=True)\n+ rep_times = List(Integer(validate=Range(min=0)), required=True)\n meas_kernels = List(String(), required=True)\n discriminators = List(String(), required=True)\n", "issue": "rep_times should be integer and not float in provider models\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**:\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\nMarshmallow provider models expect float as rep_time.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Model and schema for backend configuration.\"\"\"\n\nfrom marshmallow.validate import Length, OneOf, Range, Regexp\n\nfrom qiskit.validation import BaseModel, BaseSchema, bind_schema\nfrom qiskit.validation.fields import (Boolean, DateTime, Integer, List, Nested, String,\n Complex, Float, Dict, InstructionParameter)\nfrom qiskit.validation.validate import PatternProperties\n\n\nclass GateConfigSchema(BaseSchema):\n \"\"\"Schema for GateConfig.\"\"\"\n\n # Required properties.\n name = String(required=True)\n parameters = List(String(), required=True)\n qasm_def = String(required=True)\n\n # Optional properties.\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1))\n latency_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n conditional = Boolean()\n description = String()\n\n\nclass UchannelLOSchema(BaseSchema):\n \"\"\"Schema for uchannel LO.\"\"\"\n\n # Required properties.\n q = Integer(required=True, validate=Range(min=0))\n scale = Complex(required=True)\n\n # Optional properties.\n\n\nclass PulseHamiltonianSchema(BaseSchema):\n \"\"\"Schema for PulseHamiltonian.\"\"\"\n\n # Required properties.\n h_str = List(String(), validate=Length(min=1), required=True)\n dim_osc = List(Integer(validate=Range(min=1)), required=True)\n dim_qub = List(Integer(validate=Range(min=2)), required=True)\n vars = Dict(validate=PatternProperties({\n Regexp('^([a-z0-9])+$'): InstructionParameter()\n }), required=True)\n\n # Optional properties.\n\n\nclass BackendConfigurationSchema(BaseSchema):\n \"\"\"Schema for BackendConfiguration.\"\"\"\n # Required properties.\n backend_name = String(required=True)\n backend_version = String(required=True,\n validate=Regexp(\"[0-9]+.[0-9]+.[0-9]+$\"))\n n_qubits = Integer(required=True, validate=Range(min=1))\n basis_gates = List(String(), required=True,\n validate=Length(min=1))\n gates = Nested(GateConfigSchema, required=True, many=True,\n validate=Length(min=1))\n local = Boolean(required=True)\n simulator = Boolean(required=True)\n conditional = Boolean(required=True)\n open_pulse = Boolean(required=True)\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n\n # Optional properties.\n max_experiments = Integer(validate=Range(min=1))\n sample_name = String()\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1), allow_none=True)\n n_registers = Integer(validate=Range(min=1))\n register_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n configurable = Boolean()\n credits_required = Boolean()\n online_date = DateTime()\n display_name = String()\n description = String()\n tags = List(String())\n\n\nclass QasmBackendConfigurationSchema(BackendConfigurationSchema):\n \"\"\"Schema for Qasm backend.\"\"\"\n open_pulse = Boolean(required=True, validate=OneOf([False]))\n\n\nclass PulseBackendConfigurationSchema(QasmBackendConfigurationSchema):\n \"\"\"Schema for pulse backend\"\"\"\n # Required properties.\n open_pulse = Boolean(required=True, validate=OneOf([True]))\n n_uchannels = Integer(required=True, validate=Range(min=0))\n u_channel_lo = List(Nested(UchannelLOSchema, validate=Length(min=1),\n required=True, many=True))\n meas_levels = List(Integer(), validate=Length(min=1), required=True)\n qubit_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n meas_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name\n dtm = Float(required=True, validate=Range(min=0))\n rep_times = List(Float(validate=Range(min=0)), required=True)\n meas_kernels = List(String(), required=True)\n discriminators = List(String(), required=True)\n\n # Optional properties.\n meas_map = List(List(Integer(), validate=Length(min=1)))\n channel_bandwidth = List(List(Float(), validate=Length(equal=2)))\n acquisition_latency = List(List(Integer()))\n conditional_latency = List(List(Integer()))\n hamiltonian = PulseHamiltonianSchema()\n\n\n@bind_schema(GateConfigSchema)\nclass GateConfig(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n name (str): the gate name as it will be referred to in Qasm.\n parameters (list[str]): variable names for the gate parameters (if any).\n qasm_def (str): definition of this gate in terms of Qasm primitives U\n and CX.\n \"\"\"\n\n def __init__(self, name, parameters, qasm_def, **kwargs):\n self.name = name\n self.parameters = parameters\n self.qasm_def = qasm_def\n\n super().__init__(**kwargs)\n\n\n@bind_schema(UchannelLOSchema)\nclass UchannelLO(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n q (int): Qubit that scale corresponds too.\n scale (complex): Scale factor for qubit frequency.\n \"\"\"\n def __init__(self, q, scale, **kwargs):\n\n self.q = q\n self.scale = scale\n\n super().__init__(q=q, scale=scale, **kwargs)\n\n\n@bind_schema(BackendConfigurationSchema)\nclass BackendConfiguration(BaseModel):\n \"\"\"Model for BackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``BackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n self.backend_name = backend_name\n self.backend_version = backend_version\n self.n_qubits = n_qubits\n self.basis_gates = basis_gates\n self.gates = gates\n self.local = local\n self.simulator = simulator\n self.conditional = conditional\n self.open_pulse = open_pulse\n self.memory = memory\n self.max_shots = max_shots\n\n super().__init__(**kwargs)\n\n\n@bind_schema(QasmBackendConfigurationSchema)\nclass QasmBackendConfiguration(BackendConfiguration):\n \"\"\"Model for QasmBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``QasmBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n **kwargs)\n\n\n@bind_schema(PulseBackendConfigurationSchema)\nclass PulseBackendConfiguration(BackendConfiguration):\n \"\"\"Model for PulseBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``PulseBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n n_uchannels (int): Number of u-channels.\n u_channel_lo (list[UchannelLO]): U-channel relationship on device los.\n meas_levels (list[int]): Supported measurement levels.\n qubit_lo_range (list[list[float]]): Qubit lo ranges for each qubit\n with form (min, max) in GHz.\n meas_lo_range (list[list[float]]): Measurement lo ranges for each qubit\n with form (min, max) in GHz.\n dt (float): Qubit drive channel timestep in nanoseconds.\n dtm (float): Measurement drive channel timestep in nanoseconds.\n rep_times (list[float]): Supported repetition times for device in microseconds.\n meas_kernels (list[str]): Supported measurement kernels.\n discriminators: Supported discriminators.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, n_uchannels, u_channel_lo, meas_levels,\n qubit_lo_range, meas_lo_range, dt, dtm, rep_times, meas_kernels,\n discriminators, **kwargs):\n\n self.n_uchannels = n_uchannels\n self.u_channel_lo = u_channel_lo\n self.meas_levels = meas_levels\n self.qubit_lo_range = qubit_lo_range\n self.meas_lo_range = meas_lo_range\n self.dt = dt # pylint: disable=invalid-name\n self.dtm = dtm\n self.rep_times = rep_times\n self.meas_kernels = meas_kernels\n self.discriminators = discriminators\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n n_uchannels=n_uchannels, u_channel_lo=u_channel_lo,\n meas_levels=meas_levels, qubit_lo_range=qubit_lo_range,\n meas_lo_range=meas_lo_range, dt=dt, dtm=dtm,\n rep_times=rep_times, meas_kernels=meas_kernels,\n discriminators=discriminators, **kwargs)\n", "path": "qiskit/providers/models/backendconfiguration.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Model and schema for backend configuration.\"\"\"\n\nfrom marshmallow.validate import Length, OneOf, Range, Regexp\n\nfrom qiskit.validation import BaseModel, BaseSchema, bind_schema\nfrom qiskit.validation.fields import (Boolean, DateTime, Integer, List, Nested, String,\n Complex, Float, Dict, InstructionParameter)\nfrom qiskit.validation.validate import PatternProperties\n\n\nclass GateConfigSchema(BaseSchema):\n \"\"\"Schema for GateConfig.\"\"\"\n\n # Required properties.\n name = String(required=True)\n parameters = List(String(), required=True)\n qasm_def = String(required=True)\n\n # Optional properties.\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1))\n latency_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n conditional = Boolean()\n description = String()\n\n\nclass UchannelLOSchema(BaseSchema):\n \"\"\"Schema for uchannel LO.\"\"\"\n\n # Required properties.\n q = Integer(required=True, validate=Range(min=0))\n scale = Complex(required=True)\n\n # Optional properties.\n\n\nclass PulseHamiltonianSchema(BaseSchema):\n \"\"\"Schema for PulseHamiltonian.\"\"\"\n\n # Required properties.\n h_str = List(String(), validate=Length(min=1), required=True)\n dim_osc = List(Integer(validate=Range(min=1)), required=True)\n dim_qub = List(Integer(validate=Range(min=2)), required=True)\n vars = Dict(validate=PatternProperties({\n Regexp('^([a-z0-9])+$'): InstructionParameter()\n }), required=True)\n\n # Optional properties.\n\n\nclass BackendConfigurationSchema(BaseSchema):\n \"\"\"Schema for BackendConfiguration.\"\"\"\n # Required properties.\n backend_name = String(required=True)\n backend_version = String(required=True,\n validate=Regexp(\"[0-9]+.[0-9]+.[0-9]+$\"))\n n_qubits = Integer(required=True, validate=Range(min=1))\n basis_gates = List(String(), required=True,\n validate=Length(min=1))\n gates = Nested(GateConfigSchema, required=True, many=True,\n validate=Length(min=1))\n local = Boolean(required=True)\n simulator = Boolean(required=True)\n conditional = Boolean(required=True)\n open_pulse = Boolean(required=True)\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n\n # Optional properties.\n max_experiments = Integer(validate=Range(min=1))\n sample_name = String()\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1), allow_none=True)\n n_registers = Integer(validate=Range(min=1))\n register_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n configurable = Boolean()\n credits_required = Boolean()\n online_date = DateTime()\n display_name = String()\n description = String()\n tags = List(String())\n\n\nclass QasmBackendConfigurationSchema(BackendConfigurationSchema):\n \"\"\"Schema for Qasm backend.\"\"\"\n open_pulse = Boolean(required=True, validate=OneOf([False]))\n\n\nclass PulseBackendConfigurationSchema(QasmBackendConfigurationSchema):\n \"\"\"Schema for pulse backend\"\"\"\n # Required properties.\n open_pulse = Boolean(required=True, validate=OneOf([True]))\n n_uchannels = Integer(required=True, validate=Range(min=0))\n u_channel_lo = List(Nested(UchannelLOSchema, validate=Length(min=1),\n required=True, many=True))\n meas_levels = List(Integer(), validate=Length(min=1), required=True)\n qubit_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n meas_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name\n dtm = Float(required=True, validate=Range(min=0))\n rep_times = List(Integer(validate=Range(min=0)), required=True)\n meas_kernels = List(String(), required=True)\n discriminators = List(String(), required=True)\n\n # Optional properties.\n meas_map = List(List(Integer(), validate=Length(min=1)))\n channel_bandwidth = List(List(Float(), validate=Length(equal=2)))\n acquisition_latency = List(List(Integer()))\n conditional_latency = List(List(Integer()))\n hamiltonian = PulseHamiltonianSchema()\n\n\n@bind_schema(GateConfigSchema)\nclass GateConfig(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n name (str): the gate name as it will be referred to in Qasm.\n parameters (list[str]): variable names for the gate parameters (if any).\n qasm_def (str): definition of this gate in terms of Qasm primitives U\n and CX.\n \"\"\"\n\n def __init__(self, name, parameters, qasm_def, **kwargs):\n self.name = name\n self.parameters = parameters\n self.qasm_def = qasm_def\n\n super().__init__(**kwargs)\n\n\n@bind_schema(UchannelLOSchema)\nclass UchannelLO(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n q (int): Qubit that scale corresponds too.\n scale (complex): Scale factor for qubit frequency.\n \"\"\"\n def __init__(self, q, scale, **kwargs):\n\n self.q = q\n self.scale = scale\n\n super().__init__(q=q, scale=scale, **kwargs)\n\n\n@bind_schema(BackendConfigurationSchema)\nclass BackendConfiguration(BaseModel):\n \"\"\"Model for BackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``BackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n self.backend_name = backend_name\n self.backend_version = backend_version\n self.n_qubits = n_qubits\n self.basis_gates = basis_gates\n self.gates = gates\n self.local = local\n self.simulator = simulator\n self.conditional = conditional\n self.open_pulse = open_pulse\n self.memory = memory\n self.max_shots = max_shots\n\n super().__init__(**kwargs)\n\n\n@bind_schema(QasmBackendConfigurationSchema)\nclass QasmBackendConfiguration(BackendConfiguration):\n \"\"\"Model for QasmBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``QasmBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n **kwargs)\n\n\n@bind_schema(PulseBackendConfigurationSchema)\nclass PulseBackendConfiguration(BackendConfiguration):\n \"\"\"Model for PulseBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``PulseBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n n_uchannels (int): Number of u-channels.\n u_channel_lo (list[UchannelLO]): U-channel relationship on device los.\n meas_levels (list[int]): Supported measurement levels.\n qubit_lo_range (list[list[float]]): Qubit lo ranges for each qubit\n with form (min, max) in GHz.\n meas_lo_range (list[list[float]]): Measurement lo ranges for each qubit\n with form (min, max) in GHz.\n dt (float): Qubit drive channel timestep in nanoseconds.\n dtm (float): Measurement drive channel timestep in nanoseconds.\n rep_times (list[float]): Supported repetition times for device in microseconds.\n meas_kernels (list[str]): Supported measurement kernels.\n discriminators: Supported discriminators.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, n_uchannels, u_channel_lo, meas_levels,\n qubit_lo_range, meas_lo_range, dt, dtm, rep_times, meas_kernels,\n discriminators, **kwargs):\n\n self.n_uchannels = n_uchannels\n self.u_channel_lo = u_channel_lo\n self.meas_levels = meas_levels\n self.qubit_lo_range = qubit_lo_range\n self.meas_lo_range = meas_lo_range\n self.dt = dt # pylint: disable=invalid-name\n self.dtm = dtm\n self.rep_times = rep_times\n self.meas_kernels = meas_kernels\n self.discriminators = discriminators\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n n_uchannels=n_uchannels, u_channel_lo=u_channel_lo,\n meas_levels=meas_levels, qubit_lo_range=qubit_lo_range,\n meas_lo_range=meas_lo_range, dt=dt, dtm=dtm,\n rep_times=rep_times, meas_kernels=meas_kernels,\n discriminators=discriminators, **kwargs)\n", "path": "qiskit/providers/models/backendconfiguration.py"}]} | 3,937 | 162 |
gh_patches_debug_12853 | rasdani/github-patches | git_diff | goauthentik__authentik-4780 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LDAP Writeback doesn't work with eDirectory
**Describe the bug**
POST /api/v3/flows/executor/default-password-change/
LDAP Change Password writeback not working with eDirectory, looks like it's hanging on the Check Active Directory pwdProperties.
** Possible Fix **
Perhaps one could check the Root DSE for vendorVersion attribute and if it contains eDirectory as per the following value: LDAP Agent for NetIQ eDirectory 9.2.4 (40205.00) it could skip AD Password Policy Checking.
It seems eDirectory support RFC3062 for LDAP Changing password as per https://www.netiq.com/documentation/edir88/edir88/data/b1hpzdbr.html
**To Reproduce**
Steps to reproduce the behavior:
1. Setup eDirectory LDAP Sync as a Federation Source and set LDAP Password Writeback
2. Login as a LDAP Synchronised user
3. Change Password
4. Get Something went wrong! Please try again later. Error
**Expected behavior**
Change Password is expected to write back to the LDAP Server
**Logs**
Traceback (most recent call last):
File "/authentik/flows/views/executor.py", line 340, in post
stage_response = self.current_stage_view.post(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/flows/stage.py", line 98, in post
if not challenge.is_valid():
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 227, in is_valid
self._validated_data = self.run_validation(self.initial_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 426, in run_validation
value = self.to_internal_value(data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 485, in to_internal_value
validated_value = validate_method(validated_value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/prompt/stage.py", line 151, in password_single_clean
password_validate.send(sender=self, password=value, plan_context=self.plan.context)
File "/usr/local/lib/python3.11/site-packages/django/dispatch/dispatcher.py", line 176, in send
return [
^
File "/usr/local/lib/python3.11/site-packages/django/dispatch/dispatcher.py", line 177, in <listcomp>
(receiver, receiver(signal=self, sender=sender, **named))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/django/signals_handlers.py", line 65, in wrapper
return receiver(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/sources/ldap/signals.py", line 51, in ldap_password_validate
if changer.check_ad_password_complexity_enabled():
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/sources/ldap/password.py", line 77, in check_ad_password_complexity_enabled
pwd_properties = PwdProperties(raw_pwd_properties)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/enum.py", line 717, in __call__
return cls.__new__(cls, value)
^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/enum.py", line 1141, in __new__
raise exc
File "/usr/local/lib/python3.11/enum.py", line 1118, in __new__
result = cls._missing_(value)
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/enum.py", line 1387, in _missing_
raise ValueError(
builtins.ValueError: [] is not a valid PwdProperties
**Version and Deployment (please complete the following information):**
- authentik version: [2023.2.2]
- Deployment: docker-compose using stacks on a portainer system
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/ldap/password.py`
Content:
```
1 """Help validate and update passwords in LDAP"""
2 from enum import IntFlag
3 from re import split
4 from typing import Optional
5
6 from ldap3 import BASE
7 from ldap3.core.exceptions import LDAPAttributeError
8 from structlog.stdlib import get_logger
9
10 from authentik.core.models import User
11 from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME
12 from authentik.sources.ldap.models import LDAPSource
13
14 LOGGER = get_logger()
15
16 NON_ALPHA = r"~!@#$%^&*_-+=`|\(){}[]:;\"'<>,.?/"
17 RE_DISPLAYNAME_SEPARATORS = r",\.–—_\s#\t"
18
19
20 class PwdProperties(IntFlag):
21 """Possible values for the pwdProperties attribute"""
22
23 DOMAIN_PASSWORD_COMPLEX = 1
24 DOMAIN_PASSWORD_NO_ANON_CHANGE = 2
25 DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4
26 DOMAIN_LOCKOUT_ADMINS = 8
27 DOMAIN_PASSWORD_STORE_CLEARTEXT = 16
28 DOMAIN_REFUSE_PASSWORD_CHANGE = 32
29
30
31 class PasswordCategories(IntFlag):
32 """Password categories as defined by Microsoft, a category can only be counted
33 once, hence intflag."""
34
35 NONE = 0
36 ALPHA_LOWER = 1
37 ALPHA_UPPER = 2
38 ALPHA_OTHER = 4
39 NUMERIC = 8
40 SYMBOL = 16
41
42
43 class LDAPPasswordChanger:
44 """Help validate and update passwords in LDAP"""
45
46 _source: LDAPSource
47
48 def __init__(self, source: LDAPSource) -> None:
49 self._source = source
50
51 def get_domain_root_dn(self) -> str:
52 """Attempt to get root DN via MS specific fields or generic LDAP fields"""
53 info = self._source.connection.server.info
54 if "rootDomainNamingContext" in info.other:
55 return info.other["rootDomainNamingContext"][0]
56 naming_contexts = info.naming_contexts
57 naming_contexts.sort(key=len)
58 return naming_contexts[0]
59
60 def check_ad_password_complexity_enabled(self) -> bool:
61 """Check if DOMAIN_PASSWORD_COMPLEX is enabled"""
62 root_dn = self.get_domain_root_dn()
63 try:
64 root_attrs = self._source.connection.extend.standard.paged_search(
65 search_base=root_dn,
66 search_filter="(objectClass=*)",
67 search_scope=BASE,
68 attributes=["pwdProperties"],
69 )
70 root_attrs = list(root_attrs)[0]
71 except (LDAPAttributeError, KeyError, IndexError):
72 return False
73 raw_pwd_properties = root_attrs.get("attributes", {}).get("pwdProperties", None)
74 if raw_pwd_properties is None:
75 return False
76
77 pwd_properties = PwdProperties(raw_pwd_properties)
78 if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:
79 return True
80
81 return False
82
83 def change_password(self, user: User, password: str):
84 """Change user's password"""
85 user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None)
86 if not user_dn:
87 LOGGER.info(f"User has no {LDAP_DISTINGUISHED_NAME} set.")
88 return
89 try:
90 self._source.connection.extend.microsoft.modify_password(user_dn, password)
91 except LDAPAttributeError:
92 self._source.connection.extend.standard.modify_password(user_dn, new_password=password)
93
94 def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:
95 """Check if a password contains sAMAccount or displayName"""
96 users = list(
97 self._source.connection.extend.standard.paged_search(
98 search_base=user_dn,
99 search_filter=self._source.user_object_filter,
100 search_scope=BASE,
101 attributes=["displayName", "sAMAccountName"],
102 )
103 )
104 if len(users) != 1:
105 raise AssertionError()
106 user_attributes = users[0]["attributes"]
107 # If sAMAccountName is longer than 3 chars, check if its contained in password
108 if len(user_attributes["sAMAccountName"]) >= 3:
109 if password.lower() in user_attributes["sAMAccountName"].lower():
110 return False
111 # No display name set, can't check any further
112 if len(user_attributes["displayName"]) < 1:
113 return True
114 for display_name in user_attributes["displayName"]:
115 display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name)
116 for token in display_name_tokens:
117 # Ignore tokens under 3 chars
118 if len(token) < 3:
119 continue
120 if token.lower() in password.lower():
121 return False
122 return True
123
124 def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool:
125 """Check if password matches Active directory password policies
126
127 https://docs.microsoft.com/en-us/windows/security/threat-protection/
128 security-policy-settings/password-must-meet-complexity-requirements
129 """
130 if user:
131 # Check if password contains sAMAccountName or displayNames
132 if LDAP_DISTINGUISHED_NAME in user.attributes:
133 existing_user_check = self._ad_check_password_existing(
134 password, user.attributes.get(LDAP_DISTINGUISHED_NAME)
135 )
136 if not existing_user_check:
137 LOGGER.debug("Password failed name check", user=user)
138 return existing_user_check
139
140 # Step 2, match at least 3 of 5 categories
141 matched_categories = PasswordCategories.NONE
142 required = 3
143 for letter in password:
144 # Only match one category per letter,
145 if letter.islower():
146 matched_categories |= PasswordCategories.ALPHA_LOWER
147 elif letter.isupper():
148 matched_categories |= PasswordCategories.ALPHA_UPPER
149 elif not letter.isascii() and letter.isalpha():
150 # Not exactly matching microsoft's policy, but count it as "Other unicode" char
151 # when its alpha and not ascii
152 matched_categories |= PasswordCategories.ALPHA_OTHER
153 elif letter.isnumeric():
154 matched_categories |= PasswordCategories.NUMERIC
155 elif letter in NON_ALPHA:
156 matched_categories |= PasswordCategories.SYMBOL
157 if bin(matched_categories).count("1") < required:
158 LOGGER.debug(
159 "Password didn't match enough categories",
160 has=matched_categories,
161 must=required,
162 )
163 return False
164 LOGGER.debug("Password matched categories", has=matched_categories, must=required)
165 return True
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/ldap/password.py b/authentik/sources/ldap/password.py
--- a/authentik/sources/ldap/password.py
+++ b/authentik/sources/ldap/password.py
@@ -71,10 +71,13 @@
except (LDAPAttributeError, KeyError, IndexError):
return False
raw_pwd_properties = root_attrs.get("attributes", {}).get("pwdProperties", None)
- if raw_pwd_properties is None:
+ if not raw_pwd_properties:
return False
- pwd_properties = PwdProperties(raw_pwd_properties)
+ try:
+ pwd_properties = PwdProperties(raw_pwd_properties)
+ except ValueError:
+ return False
if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:
return True
| {"golden_diff": "diff --git a/authentik/sources/ldap/password.py b/authentik/sources/ldap/password.py\n--- a/authentik/sources/ldap/password.py\n+++ b/authentik/sources/ldap/password.py\n@@ -71,10 +71,13 @@\n except (LDAPAttributeError, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n- if raw_pwd_properties is None:\n+ if not raw_pwd_properties:\n return False\n \n- pwd_properties = PwdProperties(raw_pwd_properties)\n+ try:\n+ pwd_properties = PwdProperties(raw_pwd_properties)\n+ except ValueError:\n+ return False\n if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:\n return True\n", "issue": "LDAP Writeback doesn't work with eDirectory\n**Describe the bug**\r\nPOST /api/v3/flows/executor/default-password-change/\r\nLDAP Change Password writeback not working with eDirectory, looks like it's hanging on the Check Active Directory pwdProperties.\r\n\r\n** Possible Fix **\r\nPerhaps one could check the Root DSE for vendorVersion attribute and if it contains eDirectory as per the following value: LDAP Agent for NetIQ eDirectory 9.2.4 (40205.00) it could skip AD Password Policy Checking.\r\nIt seems eDirectory support RFC3062 for LDAP Changing password as per https://www.netiq.com/documentation/edir88/edir88/data/b1hpzdbr.html\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Setup eDirectory LDAP Sync as a Federation Source and set LDAP Password Writeback\r\n2. Login as a LDAP Synchronised user\r\n3. Change Password\r\n4. Get Something went wrong! Please try again later. Error\r\n\r\n**Expected behavior**\r\nChange Password is expected to write back to the LDAP Server\r\n\r\n**Logs**\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 340, in post\r\n stage_response = self.current_stage_view.post(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/flows/stage.py\", line 98, in post\r\n if not challenge.is_valid():\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 227, in is_valid\r\n self._validated_data = self.run_validation(self.initial_data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 426, in run_validation\r\n value = self.to_internal_value(data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 485, in to_internal_value\r\n validated_value = validate_method(validated_value)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/prompt/stage.py\", line 151, in password_single_clean\r\n password_validate.send(sender=self, password=value, plan_context=self.plan.context)\r\n File \"/usr/local/lib/python3.11/site-packages/django/dispatch/dispatcher.py\", line 176, in send\r\n return [\r\n ^\r\n File \"/usr/local/lib/python3.11/site-packages/django/dispatch/dispatcher.py\", line 177, in <listcomp>\r\n (receiver, receiver(signal=self, sender=sender, **named))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/django/signals_handlers.py\", line 65, in wrapper\r\n return receiver(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/sources/ldap/signals.py\", line 51, in ldap_password_validate\r\n if changer.check_ad_password_complexity_enabled():\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/sources/ldap/password.py\", line 77, in check_ad_password_complexity_enabled\r\n pwd_properties = PwdProperties(raw_pwd_properties)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/enum.py\", line 717, in __call__\r\n return cls.__new__(cls, value)\r\n ^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/enum.py\", line 1141, in __new__\r\n raise exc\r\n File \"/usr/local/lib/python3.11/enum.py\", line 1118, in __new__\r\n result = cls._missing_(value)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/enum.py\", line 1387, in _missing_\r\n raise ValueError(\r\nbuiltins.ValueError: [] is not a valid PwdProperties\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: [2023.2.2]\r\n - Deployment: docker-compose using stacks on a portainer system\n", "before_files": [{"content": "\"\"\"Help validate and update passwords in LDAP\"\"\"\nfrom enum import IntFlag\nfrom re import split\nfrom typing import Optional\n\nfrom ldap3 import BASE\nfrom ldap3.core.exceptions import LDAPAttributeError\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\n\nNON_ALPHA = r\"~!@#$%^&*_-+=`|\\(){}[]:;\\\"'<>,.?/\"\nRE_DISPLAYNAME_SEPARATORS = r\",\\.\u2013\u2014_\\s#\\t\"\n\n\nclass PwdProperties(IntFlag):\n \"\"\"Possible values for the pwdProperties attribute\"\"\"\n\n DOMAIN_PASSWORD_COMPLEX = 1\n DOMAIN_PASSWORD_NO_ANON_CHANGE = 2\n DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4\n DOMAIN_LOCKOUT_ADMINS = 8\n DOMAIN_PASSWORD_STORE_CLEARTEXT = 16\n DOMAIN_REFUSE_PASSWORD_CHANGE = 32\n\n\nclass PasswordCategories(IntFlag):\n \"\"\"Password categories as defined by Microsoft, a category can only be counted\n once, hence intflag.\"\"\"\n\n NONE = 0\n ALPHA_LOWER = 1\n ALPHA_UPPER = 2\n ALPHA_OTHER = 4\n NUMERIC = 8\n SYMBOL = 16\n\n\nclass LDAPPasswordChanger:\n \"\"\"Help validate and update passwords in LDAP\"\"\"\n\n _source: LDAPSource\n\n def __init__(self, source: LDAPSource) -> None:\n self._source = source\n\n def get_domain_root_dn(self) -> str:\n \"\"\"Attempt to get root DN via MS specific fields or generic LDAP fields\"\"\"\n info = self._source.connection.server.info\n if \"rootDomainNamingContext\" in info.other:\n return info.other[\"rootDomainNamingContext\"][0]\n naming_contexts = info.naming_contexts\n naming_contexts.sort(key=len)\n return naming_contexts[0]\n\n def check_ad_password_complexity_enabled(self) -> bool:\n \"\"\"Check if DOMAIN_PASSWORD_COMPLEX is enabled\"\"\"\n root_dn = self.get_domain_root_dn()\n try:\n root_attrs = self._source.connection.extend.standard.paged_search(\n search_base=root_dn,\n search_filter=\"(objectClass=*)\",\n search_scope=BASE,\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n except (LDAPAttributeError, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if raw_pwd_properties is None:\n return False\n\n pwd_properties = PwdProperties(raw_pwd_properties)\n if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:\n return True\n\n return False\n\n def change_password(self, user: User, password: str):\n \"\"\"Change user's password\"\"\"\n user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None)\n if not user_dn:\n LOGGER.info(f\"User has no {LDAP_DISTINGUISHED_NAME} set.\")\n return\n try:\n self._source.connection.extend.microsoft.modify_password(user_dn, password)\n except LDAPAttributeError:\n self._source.connection.extend.standard.modify_password(user_dn, new_password=password)\n\n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n \"\"\"Check if a password contains sAMAccount or displayName\"\"\"\n users = list(\n self._source.connection.extend.standard.paged_search(\n search_base=user_dn,\n search_filter=self._source.user_object_filter,\n search_scope=BASE,\n attributes=[\"displayName\", \"sAMAccountName\"],\n )\n )\n if len(users) != 1:\n raise AssertionError()\n user_attributes = users[0][\"attributes\"]\n # If sAMAccountName is longer than 3 chars, check if its contained in password\n if len(user_attributes[\"sAMAccountName\"]) >= 3:\n if password.lower() in user_attributes[\"sAMAccountName\"].lower():\n return False\n # No display name set, can't check any further\n if len(user_attributes[\"displayName\"]) < 1:\n return True\n for display_name in user_attributes[\"displayName\"]:\n display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name)\n for token in display_name_tokens:\n # Ignore tokens under 3 chars\n if len(token) < 3:\n continue\n if token.lower() in password.lower():\n return False\n return True\n\n def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool:\n \"\"\"Check if password matches Active directory password policies\n\n https://docs.microsoft.com/en-us/windows/security/threat-protection/\n security-policy-settings/password-must-meet-complexity-requirements\n \"\"\"\n if user:\n # Check if password contains sAMAccountName or displayNames\n if LDAP_DISTINGUISHED_NAME in user.attributes:\n existing_user_check = self._ad_check_password_existing(\n password, user.attributes.get(LDAP_DISTINGUISHED_NAME)\n )\n if not existing_user_check:\n LOGGER.debug(\"Password failed name check\", user=user)\n return existing_user_check\n\n # Step 2, match at least 3 of 5 categories\n matched_categories = PasswordCategories.NONE\n required = 3\n for letter in password:\n # Only match one category per letter,\n if letter.islower():\n matched_categories |= PasswordCategories.ALPHA_LOWER\n elif letter.isupper():\n matched_categories |= PasswordCategories.ALPHA_UPPER\n elif not letter.isascii() and letter.isalpha():\n # Not exactly matching microsoft's policy, but count it as \"Other unicode\" char\n # when its alpha and not ascii\n matched_categories |= PasswordCategories.ALPHA_OTHER\n elif letter.isnumeric():\n matched_categories |= PasswordCategories.NUMERIC\n elif letter in NON_ALPHA:\n matched_categories |= PasswordCategories.SYMBOL\n if bin(matched_categories).count(\"1\") < required:\n LOGGER.debug(\n \"Password didn't match enough categories\",\n has=matched_categories,\n must=required,\n )\n return False\n LOGGER.debug(\"Password matched categories\", has=matched_categories, must=required)\n return True\n", "path": "authentik/sources/ldap/password.py"}], "after_files": [{"content": "\"\"\"Help validate and update passwords in LDAP\"\"\"\nfrom enum import IntFlag\nfrom re import split\nfrom typing import Optional\n\nfrom ldap3 import BASE\nfrom ldap3.core.exceptions import LDAPAttributeError\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\n\nNON_ALPHA = r\"~!@#$%^&*_-+=`|\\(){}[]:;\\\"'<>,.?/\"\nRE_DISPLAYNAME_SEPARATORS = r\",\\.\u2013\u2014_\\s#\\t\"\n\n\nclass PwdProperties(IntFlag):\n \"\"\"Possible values for the pwdProperties attribute\"\"\"\n\n DOMAIN_PASSWORD_COMPLEX = 1\n DOMAIN_PASSWORD_NO_ANON_CHANGE = 2\n DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4\n DOMAIN_LOCKOUT_ADMINS = 8\n DOMAIN_PASSWORD_STORE_CLEARTEXT = 16\n DOMAIN_REFUSE_PASSWORD_CHANGE = 32\n\n\nclass PasswordCategories(IntFlag):\n \"\"\"Password categories as defined by Microsoft, a category can only be counted\n once, hence intflag.\"\"\"\n\n NONE = 0\n ALPHA_LOWER = 1\n ALPHA_UPPER = 2\n ALPHA_OTHER = 4\n NUMERIC = 8\n SYMBOL = 16\n\n\nclass LDAPPasswordChanger:\n \"\"\"Help validate and update passwords in LDAP\"\"\"\n\n _source: LDAPSource\n\n def __init__(self, source: LDAPSource) -> None:\n self._source = source\n\n def get_domain_root_dn(self) -> str:\n \"\"\"Attempt to get root DN via MS specific fields or generic LDAP fields\"\"\"\n info = self._source.connection.server.info\n if \"rootDomainNamingContext\" in info.other:\n return info.other[\"rootDomainNamingContext\"][0]\n naming_contexts = info.naming_contexts\n naming_contexts.sort(key=len)\n return naming_contexts[0]\n\n def check_ad_password_complexity_enabled(self) -> bool:\n \"\"\"Check if DOMAIN_PASSWORD_COMPLEX is enabled\"\"\"\n root_dn = self.get_domain_root_dn()\n try:\n root_attrs = self._source.connection.extend.standard.paged_search(\n search_base=root_dn,\n search_filter=\"(objectClass=*)\",\n search_scope=BASE,\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n except (LDAPAttributeError, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if not raw_pwd_properties:\n return False\n\n try:\n pwd_properties = PwdProperties(raw_pwd_properties)\n except ValueError:\n return False\n if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:\n return True\n\n return False\n\n def change_password(self, user: User, password: str):\n \"\"\"Change user's password\"\"\"\n user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None)\n if not user_dn:\n LOGGER.info(f\"User has no {LDAP_DISTINGUISHED_NAME} set.\")\n return\n try:\n self._source.connection.extend.microsoft.modify_password(user_dn, password)\n except LDAPAttributeError:\n self._source.connection.extend.standard.modify_password(user_dn, new_password=password)\n\n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n \"\"\"Check if a password contains sAMAccount or displayName\"\"\"\n users = list(\n self._source.connection.extend.standard.paged_search(\n search_base=user_dn,\n search_filter=self._source.user_object_filter,\n search_scope=BASE,\n attributes=[\"displayName\", \"sAMAccountName\"],\n )\n )\n if len(users) != 1:\n raise AssertionError()\n user_attributes = users[0][\"attributes\"]\n # If sAMAccountName is longer than 3 chars, check if its contained in password\n if len(user_attributes[\"sAMAccountName\"]) >= 3:\n if password.lower() in user_attributes[\"sAMAccountName\"].lower():\n return False\n # No display name set, can't check any further\n if len(user_attributes[\"displayName\"]) < 1:\n return True\n for display_name in user_attributes[\"displayName\"]:\n display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name)\n for token in display_name_tokens:\n # Ignore tokens under 3 chars\n if len(token) < 3:\n continue\n if token.lower() in password.lower():\n return False\n return True\n\n def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool:\n \"\"\"Check if password matches Active directory password policies\n\n https://docs.microsoft.com/en-us/windows/security/threat-protection/\n security-policy-settings/password-must-meet-complexity-requirements\n \"\"\"\n if user:\n # Check if password contains sAMAccountName or displayNames\n if LDAP_DISTINGUISHED_NAME in user.attributes:\n existing_user_check = self._ad_check_password_existing(\n password, user.attributes.get(LDAP_DISTINGUISHED_NAME)\n )\n if not existing_user_check:\n LOGGER.debug(\"Password failed name check\", user=user)\n return existing_user_check\n\n # Step 2, match at least 3 of 5 categories\n matched_categories = PasswordCategories.NONE\n required = 3\n for letter in password:\n # Only match one category per letter,\n if letter.islower():\n matched_categories |= PasswordCategories.ALPHA_LOWER\n elif letter.isupper():\n matched_categories |= PasswordCategories.ALPHA_UPPER\n elif not letter.isascii() and letter.isalpha():\n # Not exactly matching microsoft's policy, but count it as \"Other unicode\" char\n # when its alpha and not ascii\n matched_categories |= PasswordCategories.ALPHA_OTHER\n elif letter.isnumeric():\n matched_categories |= PasswordCategories.NUMERIC\n elif letter in NON_ALPHA:\n matched_categories |= PasswordCategories.SYMBOL\n if bin(matched_categories).count(\"1\") < required:\n LOGGER.debug(\n \"Password didn't match enough categories\",\n has=matched_categories,\n must=required,\n )\n return False\n LOGGER.debug(\"Password matched categories\", has=matched_categories, must=required)\n return True\n", "path": "authentik/sources/ldap/password.py"}]} | 2,986 | 168 |
gh_patches_debug_25689 | rasdani/github-patches | git_diff | searx__searx-542 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reddit: cannot view image
- search: !re wallpaper dark blue
- select some image
- click "View Image" button
Then the image should be shown but the searx start page is loaded. "View Page" shows the image. This is because image==page but this is an inconsistency (sort of)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/reddit.py`
Content:
```
1 """
2 Reddit
3
4 @website https://www.reddit.com/
5 @provide-api yes (https://www.reddit.com/dev/api)
6
7 @using-api yes
8 @results JSON
9 @stable yes
10 @parse url, title, content, thumbnail, publishedDate
11 """
12
13 import json
14 from cgi import escape
15 from urllib import urlencode
16 from urlparse import urlparse
17 from datetime import datetime
18
19 # engine dependent config
20 categories = ['general', 'images', 'news', 'social media']
21 page_size = 25
22
23 # search-url
24 search_url = 'https://www.reddit.com/search.json?{query}'
25
26
27 # do search-request
28 def request(query, params):
29 query = urlencode({'q': query,
30 'limit': page_size})
31 params['url'] = search_url.format(query=query)
32
33 return params
34
35
36 # get response from search-request
37 def response(resp):
38 img_results = []
39 text_results = []
40
41 search_results = json.loads(resp.text)
42
43 # return empty array if there are no results
44 if 'data' not in search_results:
45 return []
46
47 posts = search_results.get('data', {}).get('children', [])
48
49 # process results
50 for post in posts:
51 data = post['data']
52
53 # extract post information
54 params = {
55 'url': data['url'],
56 'title': data['title']
57 }
58
59 # if thumbnail field contains a valid URL, we need to change template
60 thumbnail = data['thumbnail']
61 url_info = urlparse(thumbnail)
62 # netloc & path
63 if url_info[1] != '' and url_info[2] != '':
64 params['thumbnail_src'] = thumbnail
65 params['template'] = 'images.html'
66 img_results.append(params)
67 else:
68 created = datetime.fromtimestamp(data['created_utc'])
69 content = escape(data['selftext'])
70 if len(content) > 500:
71 content = content[:500] + '...'
72 params['content'] = content
73 params['publishedDate'] = created
74 text_results.append(params)
75
76 # show images first and text results second
77 return img_results + text_results
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py
--- a/searx/engines/reddit.py
+++ b/searx/engines/reddit.py
@@ -13,7 +13,7 @@
import json
from cgi import escape
from urllib import urlencode
-from urlparse import urlparse
+from urlparse import urlparse, urljoin
from datetime import datetime
# engine dependent config
@@ -21,7 +21,8 @@
page_size = 25
# search-url
-search_url = 'https://www.reddit.com/search.json?{query}'
+base_url = 'https://www.reddit.com/'
+search_url = base_url + 'search.json?{query}'
# do search-request
@@ -52,7 +53,7 @@
# extract post information
params = {
- 'url': data['url'],
+ 'url': urljoin(base_url, data['permalink']),
'title': data['title']
}
@@ -61,6 +62,7 @@
url_info = urlparse(thumbnail)
# netloc & path
if url_info[1] != '' and url_info[2] != '':
+ params['img_src'] = data['url']
params['thumbnail_src'] = thumbnail
params['template'] = 'images.html'
img_results.append(params)
| {"golden_diff": "diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py\n--- a/searx/engines/reddit.py\n+++ b/searx/engines/reddit.py\n@@ -13,7 +13,7 @@\n import json\n from cgi import escape\n from urllib import urlencode\n-from urlparse import urlparse\n+from urlparse import urlparse, urljoin\n from datetime import datetime\n \n # engine dependent config\n@@ -21,7 +21,8 @@\n page_size = 25\n \n # search-url\n-search_url = 'https://www.reddit.com/search.json?{query}'\n+base_url = 'https://www.reddit.com/'\n+search_url = base_url + 'search.json?{query}'\n \n \n # do search-request\n@@ -52,7 +53,7 @@\n \n # extract post information\n params = {\n- 'url': data['url'],\n+ 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n \n@@ -61,6 +62,7 @@\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n+ params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n", "issue": "reddit: cannot view image\n- search: !re wallpaper dark blue\n- select some image\n- click \"View Image\" button\n\nThen the image should be shown but the searx start page is loaded. \"View Page\" shows the image. This is because image==page but this is an inconsistency (sort of)\n\n", "before_files": [{"content": "\"\"\"\n Reddit\n\n @website https://www.reddit.com/\n @provide-api yes (https://www.reddit.com/dev/api)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content, thumbnail, publishedDate\n\"\"\"\n\nimport json\nfrom cgi import escape\nfrom urllib import urlencode\nfrom urlparse import urlparse\nfrom datetime import datetime\n\n# engine dependent config\ncategories = ['general', 'images', 'news', 'social media']\npage_size = 25\n\n# search-url\nsearch_url = 'https://www.reddit.com/search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query,\n 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': data['url'],\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = escape(data['selftext'])\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}], "after_files": [{"content": "\"\"\"\n Reddit\n\n @website https://www.reddit.com/\n @provide-api yes (https://www.reddit.com/dev/api)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content, thumbnail, publishedDate\n\"\"\"\n\nimport json\nfrom cgi import escape\nfrom urllib import urlencode\nfrom urlparse import urlparse, urljoin\nfrom datetime import datetime\n\n# engine dependent config\ncategories = ['general', 'images', 'news', 'social media']\npage_size = 25\n\n# search-url\nbase_url = 'https://www.reddit.com/'\nsearch_url = base_url + 'search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query,\n 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = escape(data['selftext'])\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}]} | 952 | 304 |
gh_patches_debug_26045 | rasdani/github-patches | git_diff | pypa__pip-9163 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--help output does not redact password from configured index_url
**Environment**
* pip version: 20.2.4
* Python version: 3.8.6
* OS: Windows 10
<!-- Feel free to add more information about your environment here -->
**Description**
The help text for at least the `install` and `wheel` subcommands (likely others too) display the full configured index URL as a default, without redacting credentials:
```
Package Index Options:
-i, --index-url <url> Base URL of the Python Package Index (default https://us
er:[email protected]/api/pypi/simple). This
should point to a repository compliant with PEP 503 (the
simple repository API) or a local directory laid out in
the same format.
```
The URL is coming from pip.ini in `C:\Users\cdunklau\AppData\Roaming\pip\pip.ini`:
```
[global]
index-url = https://user:[email protected]/api/pypi/simple
```
**Expected behavior**
Pip should redact the password, as it does in log entries involving a custom index URL, e.g.:
```
Package Index Options:
-i, --index-url <url> Base URL of the Python Package Index (default https://us
er:****@mypypi.example.com/api/pypi/simple). This
should point to a repository compliant with PEP 503 (the
simple repository API) or a local directory laid out in
the same format.
```
**How to Reproduce**
1. Configure a custom index-url in pip.ini with credentials, as above
2. Run `pip install --help`
3. Observe the password is not redacted in the help text
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/cli/parser.py`
Content:
```
1 """Base option parser setup"""
2
3 # The following comment should be removed at some point in the future.
4 # mypy: disallow-untyped-defs=False
5
6 from __future__ import absolute_import
7
8 import logging
9 import optparse
10 import sys
11 import textwrap
12 from distutils.util import strtobool
13
14 from pip._vendor.contextlib2 import suppress
15 from pip._vendor.six import string_types
16
17 from pip._internal.cli.status_codes import UNKNOWN_ERROR
18 from pip._internal.configuration import Configuration, ConfigurationError
19 from pip._internal.utils.compat import get_terminal_size
20
21 logger = logging.getLogger(__name__)
22
23
24 class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
25 """A prettier/less verbose help formatter for optparse."""
26
27 def __init__(self, *args, **kwargs):
28 # help position must be aligned with __init__.parseopts.description
29 kwargs['max_help_position'] = 30
30 kwargs['indent_increment'] = 1
31 kwargs['width'] = get_terminal_size()[0] - 2
32 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
33
34 def format_option_strings(self, option):
35 return self._format_option_strings(option)
36
37 def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '):
38 """
39 Return a comma-separated list of option strings and metavars.
40
41 :param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
42 :param mvarfmt: metavar format string
43 :param optsep: separator
44 """
45 opts = []
46
47 if option._short_opts:
48 opts.append(option._short_opts[0])
49 if option._long_opts:
50 opts.append(option._long_opts[0])
51 if len(opts) > 1:
52 opts.insert(1, optsep)
53
54 if option.takes_value():
55 metavar = option.metavar or option.dest.lower()
56 opts.append(mvarfmt.format(metavar.lower()))
57
58 return ''.join(opts)
59
60 def format_heading(self, heading):
61 if heading == 'Options':
62 return ''
63 return heading + ':\n'
64
65 def format_usage(self, usage):
66 """
67 Ensure there is only one newline between usage and the first heading
68 if there is no description.
69 """
70 msg = '\nUsage: {}\n'.format(
71 self.indent_lines(textwrap.dedent(usage), " "))
72 return msg
73
74 def format_description(self, description):
75 # leave full control over description to us
76 if description:
77 if hasattr(self.parser, 'main'):
78 label = 'Commands'
79 else:
80 label = 'Description'
81 # some doc strings have initial newlines, some don't
82 description = description.lstrip('\n')
83 # some doc strings have final newlines and spaces, some don't
84 description = description.rstrip()
85 # dedent, then reindent
86 description = self.indent_lines(textwrap.dedent(description), " ")
87 description = '{}:\n{}\n'.format(label, description)
88 return description
89 else:
90 return ''
91
92 def format_epilog(self, epilog):
93 # leave full control over epilog to us
94 if epilog:
95 return epilog
96 else:
97 return ''
98
99 def indent_lines(self, text, indent):
100 new_lines = [indent + line for line in text.split('\n')]
101 return "\n".join(new_lines)
102
103
104 class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
105 """Custom help formatter for use in ConfigOptionParser.
106
107 This is updates the defaults before expanding them, allowing
108 them to show up correctly in the help listing.
109 """
110
111 def expand_default(self, option):
112 if self.parser is not None:
113 self.parser._update_defaults(self.parser.defaults)
114 return optparse.IndentedHelpFormatter.expand_default(self, option)
115
116
117 class CustomOptionParser(optparse.OptionParser):
118
119 def insert_option_group(self, idx, *args, **kwargs):
120 """Insert an OptionGroup at a given position."""
121 group = self.add_option_group(*args, **kwargs)
122
123 self.option_groups.pop()
124 self.option_groups.insert(idx, group)
125
126 return group
127
128 @property
129 def option_list_all(self):
130 """Get a list of all options, including those in option groups."""
131 res = self.option_list[:]
132 for i in self.option_groups:
133 res.extend(i.option_list)
134
135 return res
136
137
138 class ConfigOptionParser(CustomOptionParser):
139 """Custom option parser which updates its defaults by checking the
140 configuration files and environmental variables"""
141
142 def __init__(self, *args, **kwargs):
143 self.name = kwargs.pop('name')
144
145 isolated = kwargs.pop("isolated", False)
146 self.config = Configuration(isolated)
147
148 assert self.name
149 optparse.OptionParser.__init__(self, *args, **kwargs)
150
151 def check_default(self, option, key, val):
152 try:
153 return option.check_value(key, val)
154 except optparse.OptionValueError as exc:
155 print("An error occurred during configuration: {}".format(exc))
156 sys.exit(3)
157
158 def _get_ordered_configuration_items(self):
159 # Configuration gives keys in an unordered manner. Order them.
160 override_order = ["global", self.name, ":env:"]
161
162 # Pool the options into different groups
163 section_items = {name: [] for name in override_order}
164 for section_key, val in self.config.items():
165 # ignore empty values
166 if not val:
167 logger.debug(
168 "Ignoring configuration key '%s' as it's value is empty.",
169 section_key
170 )
171 continue
172
173 section, key = section_key.split(".", 1)
174 if section in override_order:
175 section_items[section].append((key, val))
176
177 # Yield each group in their override order
178 for section in override_order:
179 for key, val in section_items[section]:
180 yield key, val
181
182 def _update_defaults(self, defaults):
183 """Updates the given defaults with values from the config files and
184 the environ. Does a little special handling for certain types of
185 options (lists)."""
186
187 # Accumulate complex default state.
188 self.values = optparse.Values(self.defaults)
189 late_eval = set()
190 # Then set the options with those values
191 for key, val in self._get_ordered_configuration_items():
192 # '--' because configuration supports only long names
193 option = self.get_option('--' + key)
194
195 # Ignore options not present in this parser. E.g. non-globals put
196 # in [global] by users that want them to apply to all applicable
197 # commands.
198 if option is None:
199 continue
200
201 if option.action in ('store_true', 'store_false'):
202 try:
203 val = strtobool(val)
204 except ValueError:
205 self.error(
206 '{} is not a valid value for {} option, ' # noqa
207 'please specify a boolean value like yes/no, '
208 'true/false or 1/0 instead.'.format(val, key)
209 )
210 elif option.action == 'count':
211 with suppress(ValueError):
212 val = strtobool(val)
213 with suppress(ValueError):
214 val = int(val)
215 if not isinstance(val, int) or val < 0:
216 self.error(
217 '{} is not a valid value for {} option, ' # noqa
218 'please instead specify either a non-negative integer '
219 'or a boolean value like yes/no or false/true '
220 'which is equivalent to 1/0.'.format(val, key)
221 )
222 elif option.action == 'append':
223 val = val.split()
224 val = [self.check_default(option, key, v) for v in val]
225 elif option.action == 'callback':
226 late_eval.add(option.dest)
227 opt_str = option.get_opt_string()
228 val = option.convert_value(opt_str, val)
229 # From take_action
230 args = option.callback_args or ()
231 kwargs = option.callback_kwargs or {}
232 option.callback(option, opt_str, val, self, *args, **kwargs)
233 else:
234 val = self.check_default(option, key, val)
235
236 defaults[option.dest] = val
237
238 for key in late_eval:
239 defaults[key] = getattr(self.values, key)
240 self.values = None
241 return defaults
242
243 def get_default_values(self):
244 """Overriding to make updating the defaults after instantiation of
245 the option parser possible, _update_defaults() does the dirty work."""
246 if not self.process_default_values:
247 # Old, pre-Optik 1.5 behaviour.
248 return optparse.Values(self.defaults)
249
250 # Load the configuration, or error out in case of an error
251 try:
252 self.config.load()
253 except ConfigurationError as err:
254 self.exit(UNKNOWN_ERROR, str(err))
255
256 defaults = self._update_defaults(self.defaults.copy()) # ours
257 for option in self._get_all_options():
258 default = defaults.get(option.dest)
259 if isinstance(default, string_types):
260 opt_str = option.get_opt_string()
261 defaults[option.dest] = option.check_value(opt_str, default)
262 return optparse.Values(defaults)
263
264 def error(self, msg):
265 self.print_usage(sys.stderr)
266 self.exit(UNKNOWN_ERROR, "{}\n".format(msg))
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/cli/parser.py b/src/pip/_internal/cli/parser.py
--- a/src/pip/_internal/cli/parser.py
+++ b/src/pip/_internal/cli/parser.py
@@ -17,6 +17,7 @@
from pip._internal.cli.status_codes import UNKNOWN_ERROR
from pip._internal.configuration import Configuration, ConfigurationError
from pip._internal.utils.compat import get_terminal_size
+from pip._internal.utils.misc import redact_auth_from_url
logger = logging.getLogger(__name__)
@@ -106,12 +107,22 @@
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
+
+ Also redact auth from url type options
"""
def expand_default(self, option):
+ default_value = None
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
- return optparse.IndentedHelpFormatter.expand_default(self, option)
+ default_value = self.parser.defaults.get(option.dest)
+ help_text = optparse.IndentedHelpFormatter.expand_default(self, option)
+
+ if default_value and option.metavar == 'URL':
+ help_text = help_text.replace(
+ default_value, redact_auth_from_url(default_value))
+
+ return help_text
class CustomOptionParser(optparse.OptionParser):
| {"golden_diff": "diff --git a/src/pip/_internal/cli/parser.py b/src/pip/_internal/cli/parser.py\n--- a/src/pip/_internal/cli/parser.py\n+++ b/src/pip/_internal/cli/parser.py\n@@ -17,6 +17,7 @@\n from pip._internal.cli.status_codes import UNKNOWN_ERROR\n from pip._internal.configuration import Configuration, ConfigurationError\n from pip._internal.utils.compat import get_terminal_size\n+from pip._internal.utils.misc import redact_auth_from_url\n \n logger = logging.getLogger(__name__)\n \n@@ -106,12 +107,22 @@\n \n This is updates the defaults before expanding them, allowing\n them to show up correctly in the help listing.\n+\n+ Also redact auth from url type options\n \"\"\"\n \n def expand_default(self, option):\n+ default_value = None\n if self.parser is not None:\n self.parser._update_defaults(self.parser.defaults)\n- return optparse.IndentedHelpFormatter.expand_default(self, option)\n+ default_value = self.parser.defaults.get(option.dest)\n+ help_text = optparse.IndentedHelpFormatter.expand_default(self, option)\n+\n+ if default_value and option.metavar == 'URL':\n+ help_text = help_text.replace(\n+ default_value, redact_auth_from_url(default_value))\n+\n+ return help_text\n \n \n class CustomOptionParser(optparse.OptionParser):\n", "issue": "--help output does not redact password from configured index_url\n**Environment**\r\n\r\n* pip version: 20.2.4\r\n* Python version: 3.8.6\r\n* OS: Windows 10\r\n\r\n<!-- Feel free to add more information about your environment here -->\r\n\r\n**Description**\r\n\r\nThe help text for at least the `install` and `wheel` subcommands (likely others too) display the full configured index URL as a default, without redacting credentials:\r\n\r\n```\r\nPackage Index Options:\r\n -i, --index-url <url> Base URL of the Python Package Index (default https://us\r\n er:[email protected]/api/pypi/simple). This\r\n should point to a repository compliant with PEP 503 (the\r\n simple repository API) or a local directory laid out in\r\n the same format.\r\n```\r\n\r\nThe URL is coming from pip.ini in `C:\\Users\\cdunklau\\AppData\\Roaming\\pip\\pip.ini`:\r\n\r\n```\r\n[global]\r\nindex-url = https://user:[email protected]/api/pypi/simple\r\n```\r\n\r\n**Expected behavior**\r\n\r\nPip should redact the password, as it does in log entries involving a custom index URL, e.g.:\r\n\r\n```\r\nPackage Index Options:\r\n -i, --index-url <url> Base URL of the Python Package Index (default https://us\r\n er:****@mypypi.example.com/api/pypi/simple). This\r\n should point to a repository compliant with PEP 503 (the\r\n simple repository API) or a local directory laid out in\r\n the same format.\r\n```\r\n\r\n**How to Reproduce**\r\n\r\n1. Configure a custom index-url in pip.ini with credentials, as above\r\n2. Run `pip install --help`\r\n3. Observe the password is not redacted in the help text\n", "before_files": [{"content": "\"\"\"Base option parser setup\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport logging\nimport optparse\nimport sys\nimport textwrap\nfrom distutils.util import strtobool\n\nfrom pip._vendor.contextlib2 import suppress\nfrom pip._vendor.six import string_types\n\nfrom pip._internal.cli.status_codes import UNKNOWN_ERROR\nfrom pip._internal.configuration import Configuration, ConfigurationError\nfrom pip._internal.utils.compat import get_terminal_size\n\nlogger = logging.getLogger(__name__)\n\n\nclass PrettyHelpFormatter(optparse.IndentedHelpFormatter):\n \"\"\"A prettier/less verbose help formatter for optparse.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # help position must be aligned with __init__.parseopts.description\n kwargs['max_help_position'] = 30\n kwargs['indent_increment'] = 1\n kwargs['width'] = get_terminal_size()[0] - 2\n optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)\n\n def format_option_strings(self, option):\n return self._format_option_strings(option)\n\n def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '):\n \"\"\"\n Return a comma-separated list of option strings and metavars.\n\n :param option: tuple of (short opt, long opt), e.g: ('-f', '--format')\n :param mvarfmt: metavar format string\n :param optsep: separator\n \"\"\"\n opts = []\n\n if option._short_opts:\n opts.append(option._short_opts[0])\n if option._long_opts:\n opts.append(option._long_opts[0])\n if len(opts) > 1:\n opts.insert(1, optsep)\n\n if option.takes_value():\n metavar = option.metavar or option.dest.lower()\n opts.append(mvarfmt.format(metavar.lower()))\n\n return ''.join(opts)\n\n def format_heading(self, heading):\n if heading == 'Options':\n return ''\n return heading + ':\\n'\n\n def format_usage(self, usage):\n \"\"\"\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n \"\"\"\n msg = '\\nUsage: {}\\n'.format(\n self.indent_lines(textwrap.dedent(usage), \" \"))\n return msg\n\n def format_description(self, description):\n # leave full control over description to us\n if description:\n if hasattr(self.parser, 'main'):\n label = 'Commands'\n else:\n label = 'Description'\n # some doc strings have initial newlines, some don't\n description = description.lstrip('\\n')\n # some doc strings have final newlines and spaces, some don't\n description = description.rstrip()\n # dedent, then reindent\n description = self.indent_lines(textwrap.dedent(description), \" \")\n description = '{}:\\n{}\\n'.format(label, description)\n return description\n else:\n return ''\n\n def format_epilog(self, epilog):\n # leave full control over epilog to us\n if epilog:\n return epilog\n else:\n return ''\n\n def indent_lines(self, text, indent):\n new_lines = [indent + line for line in text.split('\\n')]\n return \"\\n\".join(new_lines)\n\n\nclass UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):\n \"\"\"Custom help formatter for use in ConfigOptionParser.\n\n This is updates the defaults before expanding them, allowing\n them to show up correctly in the help listing.\n \"\"\"\n\n def expand_default(self, option):\n if self.parser is not None:\n self.parser._update_defaults(self.parser.defaults)\n return optparse.IndentedHelpFormatter.expand_default(self, option)\n\n\nclass CustomOptionParser(optparse.OptionParser):\n\n def insert_option_group(self, idx, *args, **kwargs):\n \"\"\"Insert an OptionGroup at a given position.\"\"\"\n group = self.add_option_group(*args, **kwargs)\n\n self.option_groups.pop()\n self.option_groups.insert(idx, group)\n\n return group\n\n @property\n def option_list_all(self):\n \"\"\"Get a list of all options, including those in option groups.\"\"\"\n res = self.option_list[:]\n for i in self.option_groups:\n res.extend(i.option_list)\n\n return res\n\n\nclass ConfigOptionParser(CustomOptionParser):\n \"\"\"Custom option parser which updates its defaults by checking the\n configuration files and environmental variables\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.name = kwargs.pop('name')\n\n isolated = kwargs.pop(\"isolated\", False)\n self.config = Configuration(isolated)\n\n assert self.name\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n def check_default(self, option, key, val):\n try:\n return option.check_value(key, val)\n except optparse.OptionValueError as exc:\n print(\"An error occurred during configuration: {}\".format(exc))\n sys.exit(3)\n\n def _get_ordered_configuration_items(self):\n # Configuration gives keys in an unordered manner. Order them.\n override_order = [\"global\", self.name, \":env:\"]\n\n # Pool the options into different groups\n section_items = {name: [] for name in override_order}\n for section_key, val in self.config.items():\n # ignore empty values\n if not val:\n logger.debug(\n \"Ignoring configuration key '%s' as it's value is empty.\",\n section_key\n )\n continue\n\n section, key = section_key.split(\".\", 1)\n if section in override_order:\n section_items[section].append((key, val))\n\n # Yield each group in their override order\n for section in override_order:\n for key, val in section_items[section]:\n yield key, val\n\n def _update_defaults(self, defaults):\n \"\"\"Updates the given defaults with values from the config files and\n the environ. Does a little special handling for certain types of\n options (lists).\"\"\"\n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n # Then set the options with those values\n for key, val in self._get_ordered_configuration_items():\n # '--' because configuration supports only long names\n option = self.get_option('--' + key)\n\n # Ignore options not present in this parser. E.g. non-globals put\n # in [global] by users that want them to apply to all applicable\n # commands.\n if option is None:\n continue\n\n if option.action in ('store_true', 'store_false'):\n try:\n val = strtobool(val)\n except ValueError:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please specify a boolean value like yes/no, '\n 'true/false or 1/0 instead.'.format(val, key)\n )\n elif option.action == 'count':\n with suppress(ValueError):\n val = strtobool(val)\n with suppress(ValueError):\n val = int(val)\n if not isinstance(val, int) or val < 0:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please instead specify either a non-negative integer '\n 'or a boolean value like yes/no or false/true '\n 'which is equivalent to 1/0.'.format(val, key)\n )\n elif option.action == 'append':\n val = val.split()\n val = [self.check_default(option, key, v) for v in val]\n elif option.action == 'callback':\n late_eval.add(option.dest)\n opt_str = option.get_opt_string()\n val = option.convert_value(opt_str, val)\n # From take_action\n args = option.callback_args or ()\n kwargs = option.callback_kwargs or {}\n option.callback(option, opt_str, val, self, *args, **kwargs)\n else:\n val = self.check_default(option, key, val)\n\n defaults[option.dest] = val\n\n for key in late_eval:\n defaults[key] = getattr(self.values, key)\n self.values = None\n return defaults\n\n def get_default_values(self):\n \"\"\"Overriding to make updating the defaults after instantiation of\n the option parser possible, _update_defaults() does the dirty work.\"\"\"\n if not self.process_default_values:\n # Old, pre-Optik 1.5 behaviour.\n return optparse.Values(self.defaults)\n\n # Load the configuration, or error out in case of an error\n try:\n self.config.load()\n except ConfigurationError as err:\n self.exit(UNKNOWN_ERROR, str(err))\n\n defaults = self._update_defaults(self.defaults.copy()) # ours\n for option in self._get_all_options():\n default = defaults.get(option.dest)\n if isinstance(default, string_types):\n opt_str = option.get_opt_string()\n defaults[option.dest] = option.check_value(opt_str, default)\n return optparse.Values(defaults)\n\n def error(self, msg):\n self.print_usage(sys.stderr)\n self.exit(UNKNOWN_ERROR, \"{}\\n\".format(msg))\n", "path": "src/pip/_internal/cli/parser.py"}], "after_files": [{"content": "\"\"\"Base option parser setup\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport logging\nimport optparse\nimport sys\nimport textwrap\nfrom distutils.util import strtobool\n\nfrom pip._vendor.contextlib2 import suppress\nfrom pip._vendor.six import string_types\n\nfrom pip._internal.cli.status_codes import UNKNOWN_ERROR\nfrom pip._internal.configuration import Configuration, ConfigurationError\nfrom pip._internal.utils.compat import get_terminal_size\nfrom pip._internal.utils.misc import redact_auth_from_url\n\nlogger = logging.getLogger(__name__)\n\n\nclass PrettyHelpFormatter(optparse.IndentedHelpFormatter):\n \"\"\"A prettier/less verbose help formatter for optparse.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # help position must be aligned with __init__.parseopts.description\n kwargs['max_help_position'] = 30\n kwargs['indent_increment'] = 1\n kwargs['width'] = get_terminal_size()[0] - 2\n optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)\n\n def format_option_strings(self, option):\n return self._format_option_strings(option)\n\n def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '):\n \"\"\"\n Return a comma-separated list of option strings and metavars.\n\n :param option: tuple of (short opt, long opt), e.g: ('-f', '--format')\n :param mvarfmt: metavar format string\n :param optsep: separator\n \"\"\"\n opts = []\n\n if option._short_opts:\n opts.append(option._short_opts[0])\n if option._long_opts:\n opts.append(option._long_opts[0])\n if len(opts) > 1:\n opts.insert(1, optsep)\n\n if option.takes_value():\n metavar = option.metavar or option.dest.lower()\n opts.append(mvarfmt.format(metavar.lower()))\n\n return ''.join(opts)\n\n def format_heading(self, heading):\n if heading == 'Options':\n return ''\n return heading + ':\\n'\n\n def format_usage(self, usage):\n \"\"\"\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n \"\"\"\n msg = '\\nUsage: {}\\n'.format(\n self.indent_lines(textwrap.dedent(usage), \" \"))\n return msg\n\n def format_description(self, description):\n # leave full control over description to us\n if description:\n if hasattr(self.parser, 'main'):\n label = 'Commands'\n else:\n label = 'Description'\n # some doc strings have initial newlines, some don't\n description = description.lstrip('\\n')\n # some doc strings have final newlines and spaces, some don't\n description = description.rstrip()\n # dedent, then reindent\n description = self.indent_lines(textwrap.dedent(description), \" \")\n description = '{}:\\n{}\\n'.format(label, description)\n return description\n else:\n return ''\n\n def format_epilog(self, epilog):\n # leave full control over epilog to us\n if epilog:\n return epilog\n else:\n return ''\n\n def indent_lines(self, text, indent):\n new_lines = [indent + line for line in text.split('\\n')]\n return \"\\n\".join(new_lines)\n\n\nclass UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):\n \"\"\"Custom help formatter for use in ConfigOptionParser.\n\n This is updates the defaults before expanding them, allowing\n them to show up correctly in the help listing.\n\n Also redact auth from url type options\n \"\"\"\n\n def expand_default(self, option):\n default_value = None\n if self.parser is not None:\n self.parser._update_defaults(self.parser.defaults)\n default_value = self.parser.defaults.get(option.dest)\n help_text = optparse.IndentedHelpFormatter.expand_default(self, option)\n\n if default_value and option.metavar == 'URL':\n help_text = help_text.replace(\n default_value, redact_auth_from_url(default_value))\n\n return help_text\n\n\nclass CustomOptionParser(optparse.OptionParser):\n\n def insert_option_group(self, idx, *args, **kwargs):\n \"\"\"Insert an OptionGroup at a given position.\"\"\"\n group = self.add_option_group(*args, **kwargs)\n\n self.option_groups.pop()\n self.option_groups.insert(idx, group)\n\n return group\n\n @property\n def option_list_all(self):\n \"\"\"Get a list of all options, including those in option groups.\"\"\"\n res = self.option_list[:]\n for i in self.option_groups:\n res.extend(i.option_list)\n\n return res\n\n\nclass ConfigOptionParser(CustomOptionParser):\n \"\"\"Custom option parser which updates its defaults by checking the\n configuration files and environmental variables\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.name = kwargs.pop('name')\n\n isolated = kwargs.pop(\"isolated\", False)\n self.config = Configuration(isolated)\n\n assert self.name\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n def check_default(self, option, key, val):\n try:\n return option.check_value(key, val)\n except optparse.OptionValueError as exc:\n print(\"An error occurred during configuration: {}\".format(exc))\n sys.exit(3)\n\n def _get_ordered_configuration_items(self):\n # Configuration gives keys in an unordered manner. Order them.\n override_order = [\"global\", self.name, \":env:\"]\n\n # Pool the options into different groups\n section_items = {name: [] for name in override_order}\n for section_key, val in self.config.items():\n # ignore empty values\n if not val:\n logger.debug(\n \"Ignoring configuration key '%s' as it's value is empty.\",\n section_key\n )\n continue\n\n section, key = section_key.split(\".\", 1)\n if section in override_order:\n section_items[section].append((key, val))\n\n # Yield each group in their override order\n for section in override_order:\n for key, val in section_items[section]:\n yield key, val\n\n def _update_defaults(self, defaults):\n \"\"\"Updates the given defaults with values from the config files and\n the environ. Does a little special handling for certain types of\n options (lists).\"\"\"\n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n # Then set the options with those values\n for key, val in self._get_ordered_configuration_items():\n # '--' because configuration supports only long names\n option = self.get_option('--' + key)\n\n # Ignore options not present in this parser. E.g. non-globals put\n # in [global] by users that want them to apply to all applicable\n # commands.\n if option is None:\n continue\n\n if option.action in ('store_true', 'store_false'):\n try:\n val = strtobool(val)\n except ValueError:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please specify a boolean value like yes/no, '\n 'true/false or 1/0 instead.'.format(val, key)\n )\n elif option.action == 'count':\n with suppress(ValueError):\n val = strtobool(val)\n with suppress(ValueError):\n val = int(val)\n if not isinstance(val, int) or val < 0:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please instead specify either a non-negative integer '\n 'or a boolean value like yes/no or false/true '\n 'which is equivalent to 1/0.'.format(val, key)\n )\n elif option.action == 'append':\n val = val.split()\n val = [self.check_default(option, key, v) for v in val]\n elif option.action == 'callback':\n late_eval.add(option.dest)\n opt_str = option.get_opt_string()\n val = option.convert_value(opt_str, val)\n # From take_action\n args = option.callback_args or ()\n kwargs = option.callback_kwargs or {}\n option.callback(option, opt_str, val, self, *args, **kwargs)\n else:\n val = self.check_default(option, key, val)\n\n defaults[option.dest] = val\n\n for key in late_eval:\n defaults[key] = getattr(self.values, key)\n self.values = None\n return defaults\n\n def get_default_values(self):\n \"\"\"Overriding to make updating the defaults after instantiation of\n the option parser possible, _update_defaults() does the dirty work.\"\"\"\n if not self.process_default_values:\n # Old, pre-Optik 1.5 behaviour.\n return optparse.Values(self.defaults)\n\n # Load the configuration, or error out in case of an error\n try:\n self.config.load()\n except ConfigurationError as err:\n self.exit(UNKNOWN_ERROR, str(err))\n\n defaults = self._update_defaults(self.defaults.copy()) # ours\n for option in self._get_all_options():\n default = defaults.get(option.dest)\n if isinstance(default, string_types):\n opt_str = option.get_opt_string()\n defaults[option.dest] = option.check_value(opt_str, default)\n return optparse.Values(defaults)\n\n def error(self, msg):\n self.print_usage(sys.stderr)\n self.exit(UNKNOWN_ERROR, \"{}\\n\".format(msg))\n", "path": "src/pip/_internal/cli/parser.py"}]} | 3,365 | 296 |
gh_patches_debug_32435 | rasdani/github-patches | git_diff | facebookresearch__fairscale-55 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
oss_ddp will assert in forward when running in eval mode
## 🐛 Bug
In training, we only forward which will cause the need_reduction check to fail. This can be fixed setting need_reduction in a backward hook instead of forward.
## Command
## To Reproduce
Steps to reproduce the behavior:
<!-- If you were running a command, post the exact command that you were running -->
1.
2.
3.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
## Environment
Please copy and paste the output from the
environment collection script from PyTorch
(or fill out the checklist below manually).
You can run the script with:
```
# For security purposes, please check the contents of collect_env.py before running it.
python -m torch.utils.collect_env
```
- PyTorch Version (e.g., 1.0):
- OS (e.g., Linux):
- How you installed PyTorch (`conda`, `pip`, source):
- Build command you used (if compiling from source):
- Python version:
- CUDA/cuDNN version:
- GPU models and configuration:
- Any other relevant information:
## Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairscale/nn/data_parallel/oss_ddp.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 """
7 A distributed data parallel class that works with OSS optimizer.
8
9 Adopted from LegacyDistributedDataParallel module from fairseq.
10 """
11
12 from collections import OrderedDict
13 from contextlib import contextmanager
14 import copy
15 from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, cast
16
17 import torch
18 from torch import nn
19 import torch.distributed as dist
20
21 if TYPE_CHECKING:
22 from fairscale.optim import OSS
23 from torch import Tensor
24 from torch.nn import Parameter
25 else:
26 OSS = Any
27 Tensor = Any
28 Parameter = Any
29
30
31 class OssDdp(nn.Module):
32 """Implements distributed data parallel training with optimizer state sharding.
33
34 A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
35 This version uses a c10d process group for communication and does not
36 broadcast buffers.
37
38 Args:
39 module (~torch.nn.Module): module to be parallelized
40 oss (fairscale.optim.OSS): shared state optimizer
41 world_size (int): number of parallel workers
42 process_group (optional): the c10d process group to be used for
43 distributed gradient reduction. If None, the default WORLD process group
44 will be used.
45 buffer_size (int, optional): number of elements to buffer before
46 performing reduce (default: 256M). Used to reduce multiple small
47 params to avoid communication overhead.
48 """
49
50 def __init__(
51 self, module: nn.Module, oss: OSS, world_size: int, process_group: Any = None, buffer_size: int = 2 ** 28
52 ):
53 super().__init__()
54
55 self.module = module
56 self.world_size = world_size
57 self.process_group = process_group if process_group is not None else dist.group.WORLD
58 self.rank = dist.get_rank(self.process_group)
59
60 # Never use a bigger buffer than the number of model params
61 self.buffer_size = min(buffer_size, sum(p.numel() for p in self.module.parameters()))
62 self.buffer: Optional[Tensor] = None
63
64 # Flag used to make sure we only reduce gradients one time in the execution engine
65 self.need_reduction = False
66
67 # We can also forcibly accumulate grads locally and only do the
68 # gradients-reduce at some later time
69 self.accumulate_grads = False
70
71 # TODO (Min): The algorithm here can be improved. We are sorting params by device
72 # and by rank. Then in reduction_fn below, we pack smaller ones into
73 # a buffer for reduction.
74 # We can pre-sort them here and simplify the reduction_fn logic below
75 # since their size shouldn't change.
76
77 # make per-device lists of parameters
78 paramlists: OrderedDict = OrderedDict()
79 for param in self.module.parameters():
80 device = param.device
81 if paramlists.get(device) is None:
82 paramlists[device] = []
83 paramlists[device] += [param]
84 self.per_device_params = list(paramlists.values())
85
86 # query oss and build a param-to-rank table
87 self.param_rank = {}
88 for rank, param_groups in enumerate(oss.partition_parameters()):
89 for param_group in param_groups:
90 for param in param_group["params"]:
91 self.param_rank[param] = rank
92
93 # sanity checks
94 assert len(self.param_rank) == len(list(self.module.parameters())), "number of params do not match"
95 for param in self.module.parameters():
96 assert param in self.param_rank, f"{param} not in the optimizer"
97
98 def __getstate__(self) -> Dict:
99 attrs = copy.copy(self.__dict__)
100 return attrs
101
102 @contextmanager
103 def no_sync(self) -> Generator:
104 """A context manager to disable gradient synchronization."""
105 old_accumulate_grads = self.accumulate_grads
106 self.accumulate_grads = True
107 yield
108 self.accumulate_grads = old_accumulate_grads
109
110 def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:
111 if self.need_reduction:
112 raise RuntimeError("OssDdp requires explicit reduction, must call OssDdp.reduce")
113 if not self.accumulate_grads:
114 self.need_reduction = True
115 return self.module(*inputs, **kwargs)
116
117 def reduce(self) -> None:
118 """
119 This function must be called explicitly after backward to reduce
120 gradients. There is no automatic hook like c10d.
121 """
122
123 def reduce_params(params: List[Parameter], params_rank: int) -> None:
124 """ Helper to reduce a list of params that should fix in the buffer. """
125 assert self.buffer is not None
126 buffer: Tensor = cast(Tensor, self.buffer)
127 nonzero_buffer = False
128 if len(params) > 1:
129 offset = 0
130 for p in params:
131 sz = p.numel()
132 if p.grad is not None:
133 # The type error could have been fixed in later
134 # version of pytorch. Same elsewhere.
135 buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) # type: ignore
136 nonzero_buffer = True
137 else:
138 buffer[offset : offset + sz].zero_()
139 offset += sz
140 else:
141 # we only have a single grad to reduce
142 p = params[0]
143 if p.grad is not None:
144 buffer = p.grad.data
145 nonzero_buffer = True
146 elif p.numel() <= self.buffer.numel():
147 buffer = buffer[: p.numel()]
148 buffer.zero_()
149 else:
150 buffer = torch.zeros_like(p)
151
152 if nonzero_buffer:
153 buffer.div_(self.world_size) # type: ignore
154
155 dist.reduce(buffer, params_rank, group=self.process_group) # type: ignore
156
157 if params_rank == self.rank:
158 # copy reduced grads back into their original place
159 offset = 0
160 for p in params:
161 sz = p.numel()
162 if p.grad is not None:
163 p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) # type: ignore
164 else:
165 p.grad = buffer[offset : offset + sz].view_as(p).clone()
166 offset += sz
167 else:
168 # zero the grads
169 for p in params:
170 if p.grad is not None:
171 p.grad.data.zero_()
172
173 def reduction_fn() -> None:
174 # This function only needs to be called once
175 if not self.need_reduction or self.accumulate_grads:
176 return
177 self.need_reduction = False
178
179 if self.buffer is None:
180 self.buffer = next(self.module.parameters()).new(self.buffer_size) # type: ignore
181
182 for params in self.per_device_params:
183 # Reduce the gradients in buckets
184 offset = 0
185 buffered_params: List[Parameter] = []
186 param_rank: Optional[int] = None
187 for param in params:
188 last_param_rank: Optional[int] = param_rank
189 param_rank = self.param_rank[param]
190 if not param.requires_grad:
191 continue
192 if param.grad is None:
193 param.grad = torch.zeros_like(param)
194 if param.grad.requires_grad:
195 raise RuntimeError("DistributedDataParallel only works with gradients that don't require grad")
196 sz = param.numel()
197 if sz > self.buffer.numel():
198 # reduce big params directly
199 assert param_rank is not None
200 reduce_params([param], cast(int, param_rank))
201 else:
202 # smaller params are packed together from the same device
203 # and same rank.
204 if offset + sz > self.buffer.numel() or (
205 last_param_rank is not None and last_param_rank != param_rank
206 ):
207 assert last_param_rank is not None
208 reduce_params(buffered_params, cast(int, last_param_rank))
209 offset = 0
210 buffered_params.clear()
211 buffered_params.append(param)
212 offset += sz
213
214 if len(buffered_params) > 0:
215 assert param_rank is not None
216 reduce_params(buffered_params, cast(int, param_rank))
217
218 reduction_fn()
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairscale/nn/data_parallel/oss_ddp.py b/fairscale/nn/data_parallel/oss_ddp.py
--- a/fairscale/nn/data_parallel/oss_ddp.py
+++ b/fairscale/nn/data_parallel/oss_ddp.py
@@ -99,6 +99,15 @@
attrs = copy.copy(self.__dict__)
return attrs
+ def train(self, mode: bool = True) -> "OssDdp":
+ pre_mode = self.module.training
+ self.module.train(mode)
+ if self.module.training:
+ assert not self.need_reduction or pre_mode, "incorrect state transition"
+ else:
+ assert not self.need_reduction, "try to enter eval with grads unreduced"
+ return self
+
@contextmanager
def no_sync(self) -> Generator:
"""A context manager to disable gradient synchronization."""
@@ -108,10 +117,11 @@
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:
- if self.need_reduction:
- raise RuntimeError("OssDdp requires explicit reduction, must call OssDdp.reduce")
- if not self.accumulate_grads:
- self.need_reduction = True
+ if self.module.training:
+ if self.need_reduction:
+ raise RuntimeError("OssDdp requires explicit reduction, must call OssDdp.reduce")
+ if not self.accumulate_grads:
+ self.need_reduction = True
return self.module(*inputs, **kwargs)
def reduce(self) -> None:
@@ -119,6 +129,7 @@
This function must be called explicitly after backward to reduce
gradients. There is no automatic hook like c10d.
"""
+ assert self.module.training, "Cannot call reduce in eval"
def reduce_params(params: List[Parameter], params_rank: int) -> None:
""" Helper to reduce a list of params that should fix in the buffer. """
| {"golden_diff": "diff --git a/fairscale/nn/data_parallel/oss_ddp.py b/fairscale/nn/data_parallel/oss_ddp.py\n--- a/fairscale/nn/data_parallel/oss_ddp.py\n+++ b/fairscale/nn/data_parallel/oss_ddp.py\n@@ -99,6 +99,15 @@\n attrs = copy.copy(self.__dict__)\n return attrs\n \n+ def train(self, mode: bool = True) -> \"OssDdp\":\n+ pre_mode = self.module.training\n+ self.module.train(mode)\n+ if self.module.training:\n+ assert not self.need_reduction or pre_mode, \"incorrect state transition\"\n+ else:\n+ assert not self.need_reduction, \"try to enter eval with grads unreduced\"\n+ return self\n+\n @contextmanager\n def no_sync(self) -> Generator:\n \"\"\"A context manager to disable gradient synchronization.\"\"\"\n@@ -108,10 +117,11 @@\n self.accumulate_grads = old_accumulate_grads\n \n def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:\n- if self.need_reduction:\n- raise RuntimeError(\"OssDdp requires explicit reduction, must call OssDdp.reduce\")\n- if not self.accumulate_grads:\n- self.need_reduction = True\n+ if self.module.training:\n+ if self.need_reduction:\n+ raise RuntimeError(\"OssDdp requires explicit reduction, must call OssDdp.reduce\")\n+ if not self.accumulate_grads:\n+ self.need_reduction = True\n return self.module(*inputs, **kwargs)\n \n def reduce(self) -> None:\n@@ -119,6 +129,7 @@\n This function must be called explicitly after backward to reduce\n gradients. There is no automatic hook like c10d.\n \"\"\"\n+ assert self.module.training, \"Cannot call reduce in eval\"\n \n def reduce_params(params: List[Parameter], params_rank: int) -> None:\n \"\"\" Helper to reduce a list of params that should fix in the buffer. \"\"\"\n", "issue": "oss_ddp will assert in forward when running in eval mode\n## \ud83d\udc1b Bug\r\n\r\nIn training, we only forward which will cause the need_reduction check to fail. This can be fixed setting need_reduction in a backward hook instead of forward.\r\n\r\n## Command\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n<!-- If you were running a command, post the exact command that you were running -->\r\n\r\n1.\r\n2.\r\n3.\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n## Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\nPlease copy and paste the output from the\r\nenvironment collection script from PyTorch\r\n(or fill out the checklist below manually).\r\n\r\nYou can run the script with:\r\n```\r\n# For security purposes, please check the contents of collect_env.py before running it.\r\npython -m torch.utils.collect_env\r\n```\r\n\r\n - PyTorch Version (e.g., 1.0):\r\n - OS (e.g., Linux):\r\n - How you installed PyTorch (`conda`, `pip`, source):\r\n - Build command you used (if compiling from source):\r\n - Python version:\r\n - CUDA/cuDNN version:\r\n - GPU models and configuration:\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nA distributed data parallel class that works with OSS optimizer.\n\nAdopted from LegacyDistributedDataParallel module from fairseq.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport copy\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, cast\n\nimport torch\nfrom torch import nn\nimport torch.distributed as dist\n\nif TYPE_CHECKING:\n from fairscale.optim import OSS\n from torch import Tensor\n from torch.nn import Parameter\nelse:\n OSS = Any\n Tensor = Any\n Parameter = Any\n\n\nclass OssDdp(nn.Module):\n \"\"\"Implements distributed data parallel training with optimizer state sharding.\n\n A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.\n This version uses a c10d process group for communication and does not\n broadcast buffers.\n\n Args:\n module (~torch.nn.Module): module to be parallelized\n oss (fairscale.optim.OSS): shared state optimizer\n world_size (int): number of parallel workers\n process_group (optional): the c10d process group to be used for\n distributed gradient reduction. If None, the default WORLD process group\n will be used.\n buffer_size (int, optional): number of elements to buffer before\n performing reduce (default: 256M). Used to reduce multiple small\n params to avoid communication overhead.\n \"\"\"\n\n def __init__(\n self, module: nn.Module, oss: OSS, world_size: int, process_group: Any = None, buffer_size: int = 2 ** 28\n ):\n super().__init__()\n\n self.module = module\n self.world_size = world_size\n self.process_group = process_group if process_group is not None else dist.group.WORLD\n self.rank = dist.get_rank(self.process_group)\n\n # Never use a bigger buffer than the number of model params\n self.buffer_size = min(buffer_size, sum(p.numel() for p in self.module.parameters()))\n self.buffer: Optional[Tensor] = None\n\n # Flag used to make sure we only reduce gradients one time in the execution engine\n self.need_reduction = False\n\n # We can also forcibly accumulate grads locally and only do the\n # gradients-reduce at some later time\n self.accumulate_grads = False\n\n # TODO (Min): The algorithm here can be improved. We are sorting params by device\n # and by rank. Then in reduction_fn below, we pack smaller ones into\n # a buffer for reduction.\n # We can pre-sort them here and simplify the reduction_fn logic below\n # since their size shouldn't change.\n\n # make per-device lists of parameters\n paramlists: OrderedDict = OrderedDict()\n for param in self.module.parameters():\n device = param.device\n if paramlists.get(device) is None:\n paramlists[device] = []\n paramlists[device] += [param]\n self.per_device_params = list(paramlists.values())\n\n # query oss and build a param-to-rank table\n self.param_rank = {}\n for rank, param_groups in enumerate(oss.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n self.param_rank[param] = rank\n\n # sanity checks\n assert len(self.param_rank) == len(list(self.module.parameters())), \"number of params do not match\"\n for param in self.module.parameters():\n assert param in self.param_rank, f\"{param} not in the optimizer\"\n\n def __getstate__(self) -> Dict:\n attrs = copy.copy(self.__dict__)\n return attrs\n\n @contextmanager\n def no_sync(self) -> Generator:\n \"\"\"A context manager to disable gradient synchronization.\"\"\"\n old_accumulate_grads = self.accumulate_grads\n self.accumulate_grads = True\n yield\n self.accumulate_grads = old_accumulate_grads\n\n def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:\n if self.need_reduction:\n raise RuntimeError(\"OssDdp requires explicit reduction, must call OssDdp.reduce\")\n if not self.accumulate_grads:\n self.need_reduction = True\n return self.module(*inputs, **kwargs)\n\n def reduce(self) -> None:\n \"\"\"\n This function must be called explicitly after backward to reduce\n gradients. There is no automatic hook like c10d.\n \"\"\"\n\n def reduce_params(params: List[Parameter], params_rank: int) -> None:\n \"\"\" Helper to reduce a list of params that should fix in the buffer. \"\"\"\n assert self.buffer is not None\n buffer: Tensor = cast(Tensor, self.buffer)\n nonzero_buffer = False\n if len(params) > 1:\n offset = 0\n for p in params:\n sz = p.numel()\n if p.grad is not None:\n # The type error could have been fixed in later\n # version of pytorch. Same elsewhere.\n buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) # type: ignore\n nonzero_buffer = True\n else:\n buffer[offset : offset + sz].zero_()\n offset += sz\n else:\n # we only have a single grad to reduce\n p = params[0]\n if p.grad is not None:\n buffer = p.grad.data\n nonzero_buffer = True\n elif p.numel() <= self.buffer.numel():\n buffer = buffer[: p.numel()]\n buffer.zero_()\n else:\n buffer = torch.zeros_like(p)\n\n if nonzero_buffer:\n buffer.div_(self.world_size) # type: ignore\n\n dist.reduce(buffer, params_rank, group=self.process_group) # type: ignore\n\n if params_rank == self.rank:\n # copy reduced grads back into their original place\n offset = 0\n for p in params:\n sz = p.numel()\n if p.grad is not None:\n p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) # type: ignore\n else:\n p.grad = buffer[offset : offset + sz].view_as(p).clone()\n offset += sz\n else:\n # zero the grads\n for p in params:\n if p.grad is not None:\n p.grad.data.zero_()\n\n def reduction_fn() -> None:\n # This function only needs to be called once\n if not self.need_reduction or self.accumulate_grads:\n return\n self.need_reduction = False\n\n if self.buffer is None:\n self.buffer = next(self.module.parameters()).new(self.buffer_size) # type: ignore\n\n for params in self.per_device_params:\n # Reduce the gradients in buckets\n offset = 0\n buffered_params: List[Parameter] = []\n param_rank: Optional[int] = None\n for param in params:\n last_param_rank: Optional[int] = param_rank\n param_rank = self.param_rank[param]\n if not param.requires_grad:\n continue\n if param.grad is None:\n param.grad = torch.zeros_like(param)\n if param.grad.requires_grad:\n raise RuntimeError(\"DistributedDataParallel only works with gradients that don't require grad\")\n sz = param.numel()\n if sz > self.buffer.numel():\n # reduce big params directly\n assert param_rank is not None\n reduce_params([param], cast(int, param_rank))\n else:\n # smaller params are packed together from the same device\n # and same rank.\n if offset + sz > self.buffer.numel() or (\n last_param_rank is not None and last_param_rank != param_rank\n ):\n assert last_param_rank is not None\n reduce_params(buffered_params, cast(int, last_param_rank))\n offset = 0\n buffered_params.clear()\n buffered_params.append(param)\n offset += sz\n\n if len(buffered_params) > 0:\n assert param_rank is not None\n reduce_params(buffered_params, cast(int, param_rank))\n\n reduction_fn()\n", "path": "fairscale/nn/data_parallel/oss_ddp.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nA distributed data parallel class that works with OSS optimizer.\n\nAdopted from LegacyDistributedDataParallel module from fairseq.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport copy\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, cast\n\nimport torch\nfrom torch import nn\nimport torch.distributed as dist\n\nif TYPE_CHECKING:\n from fairscale.optim import OSS\n from torch import Tensor\n from torch.nn import Parameter\nelse:\n OSS = Any\n Tensor = Any\n Parameter = Any\n\n\nclass OssDdp(nn.Module):\n \"\"\"Implements distributed data parallel training with optimizer state sharding.\n\n A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.\n This version uses a c10d process group for communication and does not\n broadcast buffers.\n\n Args:\n module (~torch.nn.Module): module to be parallelized\n oss (fairscale.optim.OSS): shared state optimizer\n world_size (int): number of parallel workers\n process_group (optional): the c10d process group to be used for\n distributed gradient reduction. If None, the default WORLD process group\n will be used.\n buffer_size (int, optional): number of elements to buffer before\n performing reduce (default: 256M). Used to reduce multiple small\n params to avoid communication overhead.\n \"\"\"\n\n def __init__(\n self, module: nn.Module, oss: OSS, world_size: int, process_group: Any = None, buffer_size: int = 2 ** 28\n ):\n super().__init__()\n\n self.module = module\n self.world_size = world_size\n self.process_group = process_group if process_group is not None else dist.group.WORLD\n self.rank = dist.get_rank(self.process_group)\n\n # Never use a bigger buffer than the number of model params\n self.buffer_size = min(buffer_size, sum(p.numel() for p in self.module.parameters()))\n self.buffer: Optional[Tensor] = None\n\n # Flag used to make sure we only reduce gradients one time in the execution engine\n self.need_reduction = False\n\n # We can also forcibly accumulate grads locally and only do the\n # gradients-reduce at some later time\n self.accumulate_grads = False\n\n # TODO (Min): The algorithm here can be improved. We are sorting params by device\n # and by rank. Then in reduction_fn below, we pack smaller ones into\n # a buffer for reduction.\n # We can pre-sort them here and simplify the reduction_fn logic below\n # since their size shouldn't change.\n\n # make per-device lists of parameters\n paramlists: OrderedDict = OrderedDict()\n for param in self.module.parameters():\n device = param.device\n if paramlists.get(device) is None:\n paramlists[device] = []\n paramlists[device] += [param]\n self.per_device_params = list(paramlists.values())\n\n # query oss and build a param-to-rank table\n self.param_rank = {}\n for rank, param_groups in enumerate(oss.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n self.param_rank[param] = rank\n\n # sanity checks\n assert len(self.param_rank) == len(list(self.module.parameters())), \"number of params do not match\"\n for param in self.module.parameters():\n assert param in self.param_rank, f\"{param} not in the optimizer\"\n\n def __getstate__(self) -> Dict:\n attrs = copy.copy(self.__dict__)\n return attrs\n\n def train(self, mode: bool = True) -> \"OssDdp\":\n pre_mode = self.module.training\n self.module.train(mode)\n if self.module.training:\n assert not self.need_reduction or pre_mode, \"incorrect state transition\"\n else:\n assert not self.need_reduction, \"try to enter eval with grads unreduced\"\n return self\n\n @contextmanager\n def no_sync(self) -> Generator:\n \"\"\"A context manager to disable gradient synchronization.\"\"\"\n old_accumulate_grads = self.accumulate_grads\n self.accumulate_grads = True\n yield\n self.accumulate_grads = old_accumulate_grads\n\n def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:\n if self.module.training:\n if self.need_reduction:\n raise RuntimeError(\"OssDdp requires explicit reduction, must call OssDdp.reduce\")\n if not self.accumulate_grads:\n self.need_reduction = True\n return self.module(*inputs, **kwargs)\n\n def reduce(self) -> None:\n \"\"\"\n This function must be called explicitly after backward to reduce\n gradients. There is no automatic hook like c10d.\n \"\"\"\n assert self.module.training, \"Cannot call reduce in eval\"\n\n def reduce_params(params: List[Parameter], params_rank: int) -> None:\n \"\"\" Helper to reduce a list of params that should fix in the buffer. \"\"\"\n assert self.buffer is not None\n buffer: Tensor = cast(Tensor, self.buffer)\n nonzero_buffer = False\n if len(params) > 1:\n offset = 0\n for p in params:\n sz = p.numel()\n if p.grad is not None:\n # The type error could have been fixed in later\n # version of pytorch. Same elsewhere.\n buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) # type: ignore\n nonzero_buffer = True\n else:\n buffer[offset : offset + sz].zero_()\n offset += sz\n else:\n # we only have a single grad to reduce\n p = params[0]\n if p.grad is not None:\n buffer = p.grad.data\n nonzero_buffer = True\n elif p.numel() <= self.buffer.numel():\n buffer = buffer[: p.numel()]\n buffer.zero_()\n else:\n buffer = torch.zeros_like(p)\n\n if nonzero_buffer:\n buffer.div_(self.world_size) # type: ignore\n\n dist.reduce(buffer, params_rank, group=self.process_group) # type: ignore\n\n if params_rank == self.rank:\n # copy reduced grads back into their original place\n offset = 0\n for p in params:\n sz = p.numel()\n if p.grad is not None:\n p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) # type: ignore\n else:\n p.grad = buffer[offset : offset + sz].view_as(p).clone()\n offset += sz\n else:\n # zero the grads\n for p in params:\n if p.grad is not None:\n p.grad.data.zero_()\n\n def reduction_fn() -> None:\n # This function only needs to be called once\n if not self.need_reduction or self.accumulate_grads:\n return\n self.need_reduction = False\n\n if self.buffer is None:\n self.buffer = next(self.module.parameters()).new(self.buffer_size) # type: ignore\n\n for params in self.per_device_params:\n # Reduce the gradients in buckets\n offset = 0\n buffered_params: List[Parameter] = []\n param_rank: Optional[int] = None\n for param in params:\n last_param_rank: Optional[int] = param_rank\n param_rank = self.param_rank[param]\n if not param.requires_grad:\n continue\n if param.grad is None:\n param.grad = torch.zeros_like(param)\n if param.grad.requires_grad:\n raise RuntimeError(\"DistributedDataParallel only works with gradients that don't require grad\")\n sz = param.numel()\n if sz > self.buffer.numel():\n # reduce big params directly\n assert param_rank is not None\n reduce_params([param], cast(int, param_rank))\n else:\n # smaller params are packed together from the same device\n # and same rank.\n if offset + sz > self.buffer.numel() or (\n last_param_rank is not None and last_param_rank != param_rank\n ):\n assert last_param_rank is not None\n reduce_params(buffered_params, cast(int, last_param_rank))\n offset = 0\n buffered_params.clear()\n buffered_params.append(param)\n offset += sz\n\n if len(buffered_params) > 0:\n assert param_rank is not None\n reduce_params(buffered_params, cast(int, param_rank))\n\n reduction_fn()\n", "path": "fairscale/nn/data_parallel/oss_ddp.py"}]} | 2,882 | 459 |
gh_patches_debug_17812 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-745 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
can't find icons with setuptools `sudo python3 setup.py install`
setuptools changes where data files are relative to the installation for `python3 setup.py install` which means that the icon directory is not found.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/ui/icons.py`
Content:
```
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 from __future__ import absolute_import, division, print_function, unicode_literals
21
22 from logging import getLogger, DEBUG as _DEBUG
23 _log = getLogger(__name__)
24 del getLogger
25
26 from gi.repository import Gtk
27
28 #
29 #
30 #
31
32 _LARGE_SIZE = 64
33 Gtk.IconSize.LARGE = Gtk.icon_size_register('large', _LARGE_SIZE, _LARGE_SIZE)
34 # Gtk.IconSize.XLARGE = Gtk.icon_size_register('x-large', _LARGE_SIZE * 2, _LARGE_SIZE * 2)
35 # print ("menu", int(Gtk.IconSize.MENU), Gtk.icon_size_lookup(Gtk.IconSize.MENU))
36 # print ("small toolbar", int(Gtk.IconSize.SMALL_TOOLBAR), Gtk.icon_size_lookup(Gtk.IconSize.SMALL_TOOLBAR))
37 # print ("button", int(Gtk.IconSize.BUTTON), Gtk.icon_size_lookup(Gtk.IconSize.BUTTON))
38 # print ("large toolbar", int(Gtk.IconSize.LARGE_TOOLBAR), Gtk.icon_size_lookup(Gtk.IconSize.LARGE_TOOLBAR))
39 # print ("dnd", int(Gtk.IconSize.DND), Gtk.icon_size_lookup(Gtk.IconSize.DND))
40 # print ("dialog", int(Gtk.IconSize.DIALOG), Gtk.icon_size_lookup(Gtk.IconSize.DIALOG))
41
42 TRAY_INIT = 'solaar-init'
43 TRAY_OKAY = 'solaar'
44 TRAY_ATTENTION = 'solaar-attention'
45
46
47 def _look_for_application_icons():
48 import os.path as _path
49 from os import environ as _environ
50
51 import sys as _sys
52 if _log.isEnabledFor(_DEBUG):
53 _log.debug("sys.path[0] = %s", _sys.path[0])
54 prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))
55 src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))
56 local_share = _environ.get('XDG_DATA_HOME', _path.expanduser(_path.join('~', '.local', 'share')))
57 data_dirs = _environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share')
58 repo_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', '..', 'share'))
59 del _sys
60
61 share_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, repo_share] + data_dirs.split(':'))
62 for location in share_solaar:
63 location = _path.join(location, 'icons')
64 if _log.isEnabledFor(_DEBUG):
65 _log.debug("looking for icons in %s", location)
66
67 if _path.exists(_path.join(location, TRAY_ATTENTION + '.svg')):
68 yield location
69
70 del _environ
71 # del _path
72
73
74 _default_theme = None
75 _has_mint_icons = None
76 _has_gpm_icons = None
77 _has_oxygen_icons = None
78 _has_gnome_icons = None
79 _has_elementary_icons = None
80
81
82 def _init_icon_paths():
83 global _default_theme
84 if _default_theme:
85 return
86
87 _default_theme = Gtk.IconTheme.get_default()
88 for p in _look_for_application_icons():
89 _default_theme.prepend_search_path(p)
90 if _log.isEnabledFor(_DEBUG):
91 _log.debug("icon theme paths: %s", _default_theme.get_search_path())
92
93 global _has_mint_icons, _has_gpm_icons, _has_oxygen_icons, _has_gnome_icons, _has_elementary_icons
94
95 _has_mint_icons = _default_theme.has_icon('battery-good-symbolic')
96 _has_gpm_icons = _default_theme.has_icon('gpm-battery-020-charging')
97 _has_oxygen_icons = _default_theme.has_icon('battery-charging-caution') and \
98 _default_theme.has_icon('battery-charging-040')
99 _has_gnome_icons = _default_theme.has_icon('battery-caution-charging') and \
100 _default_theme.has_icon('battery-full-charged')
101 _has_elementary_icons = _default_theme.has_icon('battery-020-charging')
102
103 if _log.isEnabledFor(_DEBUG):
104 _log.debug("detected icon sets: Mint %s, gpm %s, oxygen %s, gnome %s, elementary %s",
105 _has_mint_icons, _has_gpm_icons, _has_oxygen_icons, _has_gnome_icons, _has_elementary_icons)
106
107 if (not _has_mint_icons and not _has_gpm_icons and not _has_oxygen_icons and
108 not _has_gnome_icons and not _has_elementary_icons):
109 _log.warning("failed to detect a known icon set")
110
111 #
112 #
113 #
114
115 def battery(level=None, charging=False):
116 icon_name = _battery_icon_name(level, charging)
117 if not _default_theme.has_icon(icon_name):
118 _log.warning("icon %s not found in current theme", icon_name);
119 # elif _log.isEnabledFor(_DEBUG):
120 # _log.debug("battery icon for %s:%s = %s", level, charging, icon_name)
121 return icon_name
122
123 # return first res where val >= guard
124 # _first_res(val,((guard,res),...))
125 def _first_res(val,pairs):
126 return next((res for guard,res in pairs if val >= guard),None)
127
128 def _battery_icon_name(level, charging):
129 _init_icon_paths()
130
131 if level is None or level < 0:
132 if _has_mint_icons:
133 return 'battery-missing-symbolic'
134 if _has_gpm_icons and _default_theme.has_icon('gpm-battery-missing'):
135 return 'gpm-battery-missing'
136 return 'battery-missing'
137
138 level_approx = 20 * ((level + 10) // 20)
139
140 if _has_mint_icons:
141 if level == 100 and charging:
142 return 'battery-full-charged-symbolic'
143 level_name = _first_res(level,((90,'full'), (50,'good'), (20,'low'), (5,'caution'), (0,'empty')))
144 return 'battery-%s%s-symbolic' % (level_name, '-charging' if charging else '')
145
146 if _has_gpm_icons:
147 if level == 100 and charging:
148 return 'gpm-battery-charged'
149 return 'gpm-battery-%03d%s' % (level_approx, '-charging' if charging else '')
150
151 if _has_oxygen_icons:
152 if level_approx == 100 and charging:
153 return 'battery-charging'
154 level_name = _first_res(level,((90,'100'), (75,'080'), (55,'060'), (35,'040'), (15,'low'), (0,'caution')))
155 return 'battery%s-%s' % ('-charging' if charging else '', level_name)
156
157 if _has_elementary_icons:
158 if level == 100 and charging:
159 return 'battery-charged'
160 return 'battery-%03d%s' % (level_approx, '-charging' if charging else '')
161
162 if _has_gnome_icons:
163 if level == 100 and charging:
164 return 'battery-full-charged'
165 if level_approx == 0 and charging:
166 return 'battery-caution-charging'
167 level_name = _first_res(level,((90,'full'), (50,'good'), (20,'low'), (5,'caution'), (0,'empty')))
168 return 'battery-%s%s' % (level_name, '-charging' if charging else '')
169
170 # fallback... most likely will fail
171 if level == 100 and charging:
172 return 'battery-charged'
173 return 'battery-%03d%s' % (level_approx, '-charging' if charging else '')
174
175 #
176 #
177 #
178
179 def lux(level=None):
180 if level is None or level < 0:
181 return 'light_unknown'
182 return 'light_%03d' % (20 * ((level + 50) // 100))
183
184 #
185 #
186 #
187
188 _ICON_SETS = {}
189
190 def device_icon_set(name='_', kind=None):
191 icon_set = _ICON_SETS.get(name)
192 if icon_set is None:
193 icon_set = Gtk.IconSet.new()
194 _ICON_SETS[name] = icon_set
195
196 # names of possible icons, in reverse order of likelihood
197 # the theme will hopefully pick up the most appropriate
198 names = ['preferences-desktop-peripherals']
199 if kind:
200 if str(kind) == 'numpad':
201 names += ('input-keyboard', 'input-dialpad')
202 elif str(kind) == 'touchpad':
203 names += ('input-mouse', 'input-tablet')
204 elif str(kind) == 'trackball':
205 names += ('input-mouse',)
206 names += ('input-' + str(kind),)
207 # names += (name.replace(' ', '-'),)
208
209 source = Gtk.IconSource.new()
210 for n in names:
211 source.set_icon_name(n)
212 icon_set.add_source(source)
213 icon_set.names = names
214
215 return icon_set
216
217
218 def device_icon_file(name, kind=None, size=_LARGE_SIZE):
219 _init_icon_paths()
220
221 icon_set = device_icon_set(name, kind)
222 assert icon_set
223 for n in reversed(icon_set.names):
224 if _default_theme.has_icon(n):
225 return _default_theme.lookup_icon(n, size, 0).get_filename()
226
227
228 def device_icon_name(name, kind=None):
229 _init_icon_paths()
230
231 icon_set = device_icon_set(name, kind)
232 assert icon_set
233 for n in reversed(icon_set.names):
234 if _default_theme.has_icon(n):
235 return n
236
237
238 def icon_file(name, size=_LARGE_SIZE):
239 _init_icon_paths()
240
241 # has_icon() somehow returned False while lookup_icon returns non-None.
242 # I guess it happens because share/solaar/icons/ has no hicolor and
243 # resolution subdirs
244 theme_icon = _default_theme.lookup_icon(name, size, 0)
245 if theme_icon:
246 file_name = theme_icon.get_filename()
247 # if _log.isEnabledFor(_DEBUG):
248 # _log.debug("icon %s(%d) => %s", name, size, file_name)
249 return file_name
250
251 _log.warn("icon %s(%d) not found in current theme", name, size)
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/ui/icons.py b/lib/solaar/ui/icons.py
--- a/lib/solaar/ui/icons.py
+++ b/lib/solaar/ui/icons.py
@@ -56,9 +56,10 @@
local_share = _environ.get('XDG_DATA_HOME', _path.expanduser(_path.join('~', '.local', 'share')))
data_dirs = _environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share')
repo_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', '..', 'share'))
+ setuptools_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', 'share'))
del _sys
- share_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, repo_share] + data_dirs.split(':'))
+ share_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, setuptools_share, repo_share] + data_dirs.split(':'))
for location in share_solaar:
location = _path.join(location, 'icons')
if _log.isEnabledFor(_DEBUG):
| {"golden_diff": "diff --git a/lib/solaar/ui/icons.py b/lib/solaar/ui/icons.py\n--- a/lib/solaar/ui/icons.py\n+++ b/lib/solaar/ui/icons.py\n@@ -56,9 +56,10 @@\n \tlocal_share = _environ.get('XDG_DATA_HOME', _path.expanduser(_path.join('~', '.local', 'share')))\n \tdata_dirs = _environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share')\n \trepo_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', '..', 'share'))\n+\tsetuptools_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', 'share'))\n \tdel _sys\n \n-\tshare_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, repo_share] + data_dirs.split(':'))\n+\tshare_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, setuptools_share, repo_share] + data_dirs.split(':'))\n \tfor location in share_solaar:\n \t\tlocation = _path.join(location, 'icons')\n \t\tif _log.isEnabledFor(_DEBUG):\n", "issue": "can't find icons with setuptools `sudo python3 setup.py install`\nsetuptools changes where data files are relative to the installation for `python3 setup.py install` which means that the icon directory is not found.\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger, DEBUG as _DEBUG\n_log = getLogger(__name__)\ndel getLogger\n\nfrom gi.repository import Gtk\n\n#\n#\n#\n\n_LARGE_SIZE = 64\nGtk.IconSize.LARGE = Gtk.icon_size_register('large', _LARGE_SIZE, _LARGE_SIZE)\n# Gtk.IconSize.XLARGE = Gtk.icon_size_register('x-large', _LARGE_SIZE * 2, _LARGE_SIZE * 2)\n# print (\"menu\", int(Gtk.IconSize.MENU), Gtk.icon_size_lookup(Gtk.IconSize.MENU))\n# print (\"small toolbar\", int(Gtk.IconSize.SMALL_TOOLBAR), Gtk.icon_size_lookup(Gtk.IconSize.SMALL_TOOLBAR))\n# print (\"button\", int(Gtk.IconSize.BUTTON), Gtk.icon_size_lookup(Gtk.IconSize.BUTTON))\n# print (\"large toolbar\", int(Gtk.IconSize.LARGE_TOOLBAR), Gtk.icon_size_lookup(Gtk.IconSize.LARGE_TOOLBAR))\n# print (\"dnd\", int(Gtk.IconSize.DND), Gtk.icon_size_lookup(Gtk.IconSize.DND))\n# print (\"dialog\", int(Gtk.IconSize.DIALOG), Gtk.icon_size_lookup(Gtk.IconSize.DIALOG))\n\nTRAY_INIT = 'solaar-init'\nTRAY_OKAY = 'solaar'\nTRAY_ATTENTION = 'solaar-attention'\n\n\ndef _look_for_application_icons():\n\timport os.path as _path\n\tfrom os import environ as _environ\n\n\timport sys as _sys\n\tif _log.isEnabledFor(_DEBUG):\n\t\t_log.debug(\"sys.path[0] = %s\", _sys.path[0])\n\tprefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))\n\tsrc_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))\n\tlocal_share = _environ.get('XDG_DATA_HOME', _path.expanduser(_path.join('~', '.local', 'share')))\n\tdata_dirs = _environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share')\n\trepo_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', '..', 'share'))\n\tdel _sys\n\n\tshare_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, repo_share] + data_dirs.split(':'))\n\tfor location in share_solaar:\n\t\tlocation = _path.join(location, 'icons')\n\t\tif _log.isEnabledFor(_DEBUG):\n\t\t\t_log.debug(\"looking for icons in %s\", location)\n\n\t\tif _path.exists(_path.join(location, TRAY_ATTENTION + '.svg')):\n\t\t\tyield location\n\n\tdel _environ\n\t# del _path\n\n\n_default_theme = None\n_has_mint_icons = None\n_has_gpm_icons = None\n_has_oxygen_icons = None\n_has_gnome_icons = None\n_has_elementary_icons = None\n\n\ndef _init_icon_paths():\n\tglobal _default_theme\n\tif _default_theme:\n\t\treturn\n\n\t_default_theme = Gtk.IconTheme.get_default()\n\tfor p in _look_for_application_icons():\n\t\t_default_theme.prepend_search_path(p)\n\tif _log.isEnabledFor(_DEBUG):\n\t\t_log.debug(\"icon theme paths: %s\", _default_theme.get_search_path())\n\n\tglobal _has_mint_icons, _has_gpm_icons, _has_oxygen_icons, _has_gnome_icons, _has_elementary_icons\n\n\t_has_mint_icons = _default_theme.has_icon('battery-good-symbolic')\n\t_has_gpm_icons = _default_theme.has_icon('gpm-battery-020-charging')\n\t_has_oxygen_icons = _default_theme.has_icon('battery-charging-caution') and \\\n\t\t\t\t\t\t_default_theme.has_icon('battery-charging-040')\n\t_has_gnome_icons = _default_theme.has_icon('battery-caution-charging') and \\\n\t\t\t\t\t\t_default_theme.has_icon('battery-full-charged')\n\t_has_elementary_icons = _default_theme.has_icon('battery-020-charging')\n\n\tif _log.isEnabledFor(_DEBUG):\n\t\t_log.debug(\"detected icon sets: Mint %s, gpm %s, oxygen %s, gnome %s, elementary %s\",\n\t\t\t\t\t\t_has_mint_icons, _has_gpm_icons, _has_oxygen_icons, _has_gnome_icons, _has_elementary_icons)\n\n\tif (not _has_mint_icons and not _has_gpm_icons and not _has_oxygen_icons and\n\t\tnot _has_gnome_icons and not _has_elementary_icons):\n\t\t_log.warning(\"failed to detect a known icon set\")\n\n#\n#\n#\n\ndef battery(level=None, charging=False):\n\ticon_name = _battery_icon_name(level, charging)\n\tif not _default_theme.has_icon(icon_name):\n\t\t_log.warning(\"icon %s not found in current theme\", icon_name);\n\t# elif _log.isEnabledFor(_DEBUG):\n\t# \t_log.debug(\"battery icon for %s:%s = %s\", level, charging, icon_name)\n\treturn icon_name\n\n# return first res where val >= guard\n# _first_res(val,((guard,res),...))\ndef _first_res(val,pairs):\n\treturn next((res for guard,res in pairs if val >= guard),None)\n\ndef _battery_icon_name(level, charging):\n\t_init_icon_paths()\n\n\tif level is None or level < 0:\n\t\tif _has_mint_icons:\n\t\t\treturn 'battery-missing-symbolic'\n\t\tif _has_gpm_icons and _default_theme.has_icon('gpm-battery-missing'):\n\t\t\treturn 'gpm-battery-missing'\n\t\treturn 'battery-missing'\n\n\tlevel_approx = 20 * ((level + 10) // 20)\n\n\tif _has_mint_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'battery-full-charged-symbolic'\n\t\tlevel_name = _first_res(level,((90,'full'), (50,'good'), (20,'low'), (5,'caution'), (0,'empty')))\n\t\treturn 'battery-%s%s-symbolic' % (level_name, '-charging' if charging else '')\n\n\tif _has_gpm_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'gpm-battery-charged'\n\t\treturn 'gpm-battery-%03d%s' % (level_approx, '-charging' if charging else '')\n\n\tif _has_oxygen_icons:\n\t\tif level_approx == 100 and charging:\n\t\t\treturn 'battery-charging'\n\t\tlevel_name = _first_res(level,((90,'100'), (75,'080'), (55,'060'), (35,'040'), (15,'low'), (0,'caution')))\n\t\treturn 'battery%s-%s' % ('-charging' if charging else '', level_name)\n\n\tif _has_elementary_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'battery-charged'\n\t\treturn 'battery-%03d%s' % (level_approx, '-charging' if charging else '')\n\n\tif _has_gnome_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'battery-full-charged'\n\t\tif level_approx == 0 and charging:\n\t\t\treturn 'battery-caution-charging'\n\t\tlevel_name = _first_res(level,((90,'full'), (50,'good'), (20,'low'), (5,'caution'), (0,'empty')))\n\t\treturn 'battery-%s%s' % (level_name, '-charging' if charging else '')\n\n\t# fallback... most likely will fail\n\tif level == 100 and charging:\n\t\treturn 'battery-charged'\n\treturn 'battery-%03d%s' % (level_approx, '-charging' if charging else '')\n\n#\n#\n#\n\ndef lux(level=None):\n\tif level is None or level < 0:\n\t\treturn 'light_unknown'\n\treturn 'light_%03d' % (20 * ((level + 50) // 100))\n\n#\n#\n#\n\n_ICON_SETS = {}\n\ndef device_icon_set(name='_', kind=None):\n\ticon_set = _ICON_SETS.get(name)\n\tif icon_set is None:\n\t\ticon_set = Gtk.IconSet.new()\n\t\t_ICON_SETS[name] = icon_set\n\n\t\t# names of possible icons, in reverse order of likelihood\n\t\t# the theme will hopefully pick up the most appropriate\n\t\tnames = ['preferences-desktop-peripherals']\n\t\tif kind:\n\t\t\tif str(kind) == 'numpad':\n\t\t\t\tnames += ('input-keyboard', 'input-dialpad')\n\t\t\telif str(kind) == 'touchpad':\n\t\t\t\tnames += ('input-mouse', 'input-tablet')\n\t\t\telif str(kind) == 'trackball':\n\t\t\t\tnames += ('input-mouse',)\n\t\t\tnames += ('input-' + str(kind),)\n\t\t# names += (name.replace(' ', '-'),)\n\n\t\tsource = Gtk.IconSource.new()\n\t\tfor n in names:\n\t\t\tsource.set_icon_name(n)\n\t\t\ticon_set.add_source(source)\n\t\ticon_set.names = names\n\n\treturn icon_set\n\n\ndef device_icon_file(name, kind=None, size=_LARGE_SIZE):\n\t_init_icon_paths()\n\n\ticon_set = device_icon_set(name, kind)\n\tassert icon_set\n\tfor n in reversed(icon_set.names):\n\t\tif _default_theme.has_icon(n):\n\t\t\treturn _default_theme.lookup_icon(n, size, 0).get_filename()\n\n\ndef device_icon_name(name, kind=None):\n\t_init_icon_paths()\n\n\ticon_set = device_icon_set(name, kind)\n\tassert icon_set\n\tfor n in reversed(icon_set.names):\n\t\tif _default_theme.has_icon(n):\n\t\t\treturn n\n\n\ndef icon_file(name, size=_LARGE_SIZE):\n\t_init_icon_paths()\n\n # has_icon() somehow returned False while lookup_icon returns non-None.\n # I guess it happens because share/solaar/icons/ has no hicolor and\n # resolution subdirs\n\ttheme_icon = _default_theme.lookup_icon(name, size, 0)\n\tif theme_icon:\n\t\tfile_name = theme_icon.get_filename()\n\t\t# if _log.isEnabledFor(_DEBUG):\n\t\t# \t_log.debug(\"icon %s(%d) => %s\", name, size, file_name)\n\t\treturn file_name\n\n\t_log.warn(\"icon %s(%d) not found in current theme\", name, size)\n", "path": "lib/solaar/ui/icons.py"}], "after_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger, DEBUG as _DEBUG\n_log = getLogger(__name__)\ndel getLogger\n\nfrom gi.repository import Gtk\n\n#\n#\n#\n\n_LARGE_SIZE = 64\nGtk.IconSize.LARGE = Gtk.icon_size_register('large', _LARGE_SIZE, _LARGE_SIZE)\n# Gtk.IconSize.XLARGE = Gtk.icon_size_register('x-large', _LARGE_SIZE * 2, _LARGE_SIZE * 2)\n# print (\"menu\", int(Gtk.IconSize.MENU), Gtk.icon_size_lookup(Gtk.IconSize.MENU))\n# print (\"small toolbar\", int(Gtk.IconSize.SMALL_TOOLBAR), Gtk.icon_size_lookup(Gtk.IconSize.SMALL_TOOLBAR))\n# print (\"button\", int(Gtk.IconSize.BUTTON), Gtk.icon_size_lookup(Gtk.IconSize.BUTTON))\n# print (\"large toolbar\", int(Gtk.IconSize.LARGE_TOOLBAR), Gtk.icon_size_lookup(Gtk.IconSize.LARGE_TOOLBAR))\n# print (\"dnd\", int(Gtk.IconSize.DND), Gtk.icon_size_lookup(Gtk.IconSize.DND))\n# print (\"dialog\", int(Gtk.IconSize.DIALOG), Gtk.icon_size_lookup(Gtk.IconSize.DIALOG))\n\nTRAY_INIT = 'solaar-init'\nTRAY_OKAY = 'solaar'\nTRAY_ATTENTION = 'solaar-attention'\n\n\ndef _look_for_application_icons():\n\timport os.path as _path\n\tfrom os import environ as _environ\n\n\timport sys as _sys\n\tif _log.isEnabledFor(_DEBUG):\n\t\t_log.debug(\"sys.path[0] = %s\", _sys.path[0])\n\tprefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))\n\tsrc_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))\n\tlocal_share = _environ.get('XDG_DATA_HOME', _path.expanduser(_path.join('~', '.local', 'share')))\n\tdata_dirs = _environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share')\n\trepo_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', '..', 'share'))\n\tsetuptools_share = _path.normpath(_path.join(_path.dirname(__file__), '..', '..', 'share'))\n\tdel _sys\n\n\tshare_solaar = [prefix_share] + list(_path.join(x, 'solaar') for x in [src_share, local_share, setuptools_share, repo_share] + data_dirs.split(':'))\n\tfor location in share_solaar:\n\t\tlocation = _path.join(location, 'icons')\n\t\tif _log.isEnabledFor(_DEBUG):\n\t\t\t_log.debug(\"looking for icons in %s\", location)\n\n\t\tif _path.exists(_path.join(location, TRAY_ATTENTION + '.svg')):\n\t\t\tyield location\n\n\tdel _environ\n\t# del _path\n\n\n_default_theme = None\n_has_mint_icons = None\n_has_gpm_icons = None\n_has_oxygen_icons = None\n_has_gnome_icons = None\n_has_elementary_icons = None\n\n\ndef _init_icon_paths():\n\tglobal _default_theme\n\tif _default_theme:\n\t\treturn\n\n\t_default_theme = Gtk.IconTheme.get_default()\n\tfor p in _look_for_application_icons():\n\t\t_default_theme.prepend_search_path(p)\n\tif _log.isEnabledFor(_DEBUG):\n\t\t_log.debug(\"icon theme paths: %s\", _default_theme.get_search_path())\n\n\tglobal _has_mint_icons, _has_gpm_icons, _has_oxygen_icons, _has_gnome_icons, _has_elementary_icons\n\n\t_has_mint_icons = _default_theme.has_icon('battery-good-symbolic')\n\t_has_gpm_icons = _default_theme.has_icon('gpm-battery-020-charging')\n\t_has_oxygen_icons = _default_theme.has_icon('battery-charging-caution') and \\\n\t\t\t\t\t\t_default_theme.has_icon('battery-charging-040')\n\t_has_gnome_icons = _default_theme.has_icon('battery-caution-charging') and \\\n\t\t\t\t\t\t_default_theme.has_icon('battery-full-charged')\n\t_has_elementary_icons = _default_theme.has_icon('battery-020-charging')\n\n\tif _log.isEnabledFor(_DEBUG):\n\t\t_log.debug(\"detected icon sets: Mint %s, gpm %s, oxygen %s, gnome %s, elementary %s\",\n\t\t\t\t\t\t_has_mint_icons, _has_gpm_icons, _has_oxygen_icons, _has_gnome_icons, _has_elementary_icons)\n\n\tif (not _has_mint_icons and not _has_gpm_icons and not _has_oxygen_icons and\n\t\tnot _has_gnome_icons and not _has_elementary_icons):\n\t\t_log.warning(\"failed to detect a known icon set\")\n\n#\n#\n#\n\ndef battery(level=None, charging=False):\n\ticon_name = _battery_icon_name(level, charging)\n\tif not _default_theme.has_icon(icon_name):\n\t\t_log.warning(\"icon %s not found in current theme\", icon_name);\n\t# elif _log.isEnabledFor(_DEBUG):\n\t# \t_log.debug(\"battery icon for %s:%s = %s\", level, charging, icon_name)\n\treturn icon_name\n\n# return first res where val >= guard\n# _first_res(val,((guard,res),...))\ndef _first_res(val,pairs):\n\treturn next((res for guard,res in pairs if val >= guard),None)\n\ndef _battery_icon_name(level, charging):\n\t_init_icon_paths()\n\n\tif level is None or level < 0:\n\t\tif _has_mint_icons:\n\t\t\treturn 'battery-missing-symbolic'\n\t\tif _has_gpm_icons and _default_theme.has_icon('gpm-battery-missing'):\n\t\t\treturn 'gpm-battery-missing'\n\t\treturn 'battery-missing'\n\n\tlevel_approx = 20 * ((level + 10) // 20)\n\n\tif _has_mint_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'battery-full-charged-symbolic'\n\t\tlevel_name = _first_res(level,((90,'full'), (50,'good'), (20,'low'), (5,'caution'), (0,'empty')))\n\t\treturn 'battery-%s%s-symbolic' % (level_name, '-charging' if charging else '')\n\n\tif _has_gpm_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'gpm-battery-charged'\n\t\treturn 'gpm-battery-%03d%s' % (level_approx, '-charging' if charging else '')\n\n\tif _has_oxygen_icons:\n\t\tif level_approx == 100 and charging:\n\t\t\treturn 'battery-charging'\n\t\tlevel_name = _first_res(level,((90,'100'), (75,'080'), (55,'060'), (35,'040'), (15,'low'), (0,'caution')))\n\t\treturn 'battery%s-%s' % ('-charging' if charging else '', level_name)\n\n\tif _has_elementary_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'battery-charged'\n\t\treturn 'battery-%03d%s' % (level_approx, '-charging' if charging else '')\n\n\tif _has_gnome_icons:\n\t\tif level == 100 and charging:\n\t\t\treturn 'battery-full-charged'\n\t\tif level_approx == 0 and charging:\n\t\t\treturn 'battery-caution-charging'\n\t\tlevel_name = _first_res(level,((90,'full'), (50,'good'), (20,'low'), (5,'caution'), (0,'empty')))\n\t\treturn 'battery-%s%s' % (level_name, '-charging' if charging else '')\n\n\t# fallback... most likely will fail\n\tif level == 100 and charging:\n\t\treturn 'battery-charged'\n\treturn 'battery-%03d%s' % (level_approx, '-charging' if charging else '')\n\n#\n#\n#\n\ndef lux(level=None):\n\tif level is None or level < 0:\n\t\treturn 'light_unknown'\n\treturn 'light_%03d' % (20 * ((level + 50) // 100))\n\n#\n#\n#\n\n_ICON_SETS = {}\n\ndef device_icon_set(name='_', kind=None):\n\ticon_set = _ICON_SETS.get(name)\n\tif icon_set is None:\n\t\ticon_set = Gtk.IconSet.new()\n\t\t_ICON_SETS[name] = icon_set\n\n\t\t# names of possible icons, in reverse order of likelihood\n\t\t# the theme will hopefully pick up the most appropriate\n\t\tnames = ['preferences-desktop-peripherals']\n\t\tif kind:\n\t\t\tif str(kind) == 'numpad':\n\t\t\t\tnames += ('input-keyboard', 'input-dialpad')\n\t\t\telif str(kind) == 'touchpad':\n\t\t\t\tnames += ('input-mouse', 'input-tablet')\n\t\t\telif str(kind) == 'trackball':\n\t\t\t\tnames += ('input-mouse',)\n\t\t\tnames += ('input-' + str(kind),)\n\t\t# names += (name.replace(' ', '-'),)\n\n\t\tsource = Gtk.IconSource.new()\n\t\tfor n in names:\n\t\t\tsource.set_icon_name(n)\n\t\t\ticon_set.add_source(source)\n\t\ticon_set.names = names\n\n\treturn icon_set\n\n\ndef device_icon_file(name, kind=None, size=_LARGE_SIZE):\n\t_init_icon_paths()\n\n\ticon_set = device_icon_set(name, kind)\n\tassert icon_set\n\tfor n in reversed(icon_set.names):\n\t\tif _default_theme.has_icon(n):\n\t\t\treturn _default_theme.lookup_icon(n, size, 0).get_filename()\n\n\ndef device_icon_name(name, kind=None):\n\t_init_icon_paths()\n\n\ticon_set = device_icon_set(name, kind)\n\tassert icon_set\n\tfor n in reversed(icon_set.names):\n\t\tif _default_theme.has_icon(n):\n\t\t\treturn n\n\n\ndef icon_file(name, size=_LARGE_SIZE):\n\t_init_icon_paths()\n\n # has_icon() somehow returned False while lookup_icon returns non-None.\n # I guess it happens because share/solaar/icons/ has no hicolor and\n # resolution subdirs\n\ttheme_icon = _default_theme.lookup_icon(name, size, 0)\n\tif theme_icon:\n\t\tfile_name = theme_icon.get_filename()\n\t\t# if _log.isEnabledFor(_DEBUG):\n\t\t# \t_log.debug(\"icon %s(%d) => %s\", name, size, file_name)\n\t\treturn file_name\n\n\t_log.warn(\"icon %s(%d) not found in current theme\", name, size)\n", "path": "lib/solaar/ui/icons.py"}]} | 3,446 | 272 |
gh_patches_debug_20819 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2062 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect insights.components.rhel_version module doc
The [example](https://github.com/RedHatInsights/insights-core/blob/master/insights/components/rhel_version.py#L17) in the insights.components.rhel_version pydoc doesn't work. The objects don't have a `rhel_version` attribute.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/components/rhel_version.py`
Content:
```
1 """
2 IsRhel6, IsRhel7 and IsRhel8
3 ===============================
4
5 The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to
6 retrieve the RHEL version information.
7 Each component checks if the release version matches the version it represents,
8 if the version does not match what is expected the class raises ``SkipComponent``
9 so that the dependent component will not fire.
10 Can be added as a dependency of a parser so that the parser only fires if the
11 ``IsRhel*`` dependency is met.
12
13 An example from the following ``/etc/redhat_release`` file output::
14
15 Red Hat Enterprise Linux release 8.0 (Ootpa)
16
17 Example:
18
19 >>> type(IsRhel8)
20 <class 'insights.components.rhel_version.Is_Rhel8'>
21 >>> is_rhel8.rhel_version
22 '8.0'
23 """
24
25 from insights.core.plugins import component
26 from insights.combiners.redhat_release import RedHatRelease
27 from insights.core.dr import SkipComponent
28
29
30 @component(RedHatRelease)
31 class IsRhel6(object):
32 """
33 This component uses ``RedHatRelease`` combiner
34 to determine RHEL version. It checks if RHEL6, if not
35 RHEL6 it raises ``SkipComponent``.
36
37 Raises:
38 SkipComponent: When RHEL version is not RHEL6.
39 """
40 def __init__(self, rhel):
41 if rhel.major != 6:
42 raise SkipComponent('Not RHEL6')
43
44
45 @component(RedHatRelease)
46 class IsRhel7(object):
47 """
48 This component uses ``RedHatRelease`` combiner
49 to determine RHEL version. It checks if RHEL7, if not \
50 RHEL7 it raises ``SkipComponent``.
51
52 Raises:
53 SkipComponent: When RHEL version is not RHEL7.
54 """
55 def __init__(self, rhel):
56 if rhel.major != 7:
57 raise SkipComponent('Not RHEL7')
58
59
60 @component(RedHatRelease)
61 class IsRhel8(object):
62 """
63 This component uses ``RedhatRelease`` combiner
64 to determine RHEL version. It checks if RHEL8, if not
65 RHEL8 it raises ``SkipComponent``.
66
67 Raises:
68 SkipComponent: When RHEL version is not RHEL8.
69 """
70 def __init__(self, rhel):
71 if rhel.major != 8:
72 raise SkipComponent('Not RHEL8')
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py
--- a/insights/components/rhel_version.py
+++ b/insights/components/rhel_version.py
@@ -2,24 +2,14 @@
IsRhel6, IsRhel7 and IsRhel8
===============================
-The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to
-retrieve the RHEL version information.
-Each component checks if the release version matches the version it represents,
-if the version does not match what is expected the class raises ``SkipComponent``
-so that the dependent component will not fire.
-Can be added as a dependency of a parser so that the parser only fires if the
-``IsRhel*`` dependency is met.
-
-An example from the following ``/etc/redhat_release`` file output::
-
- Red Hat Enterprise Linux release 8.0 (Ootpa)
-
-Example:
-
- >>> type(IsRhel8)
- <class 'insights.components.rhel_version.Is_Rhel8'>
- >>> is_rhel8.rhel_version
- '8.0'
+An ``IsRhel*`` component is valid if the
+:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates
+the major RHEL version represented by the component. Otherwise, it raises a
+:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from
+executing.
+
+In particular, an ``IsRhel*`` component can be added as a dependency of a
+parser to limit it to a given version.
"""
from insights.core.plugins import component
| {"golden_diff": "diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py\n--- a/insights/components/rhel_version.py\n+++ b/insights/components/rhel_version.py\n@@ -2,24 +2,14 @@\n IsRhel6, IsRhel7 and IsRhel8\n ===============================\n \n-The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to\n-retrieve the RHEL version information.\n-Each component checks if the release version matches the version it represents,\n-if the version does not match what is expected the class raises ``SkipComponent``\n-so that the dependent component will not fire.\n-Can be added as a dependency of a parser so that the parser only fires if the\n-``IsRhel*`` dependency is met.\n-\n-An example from the following ``/etc/redhat_release`` file output::\n-\n- Red Hat Enterprise Linux release 8.0 (Ootpa)\n-\n-Example:\n-\n- >>> type(IsRhel8)\n- <class 'insights.components.rhel_version.Is_Rhel8'>\n- >>> is_rhel8.rhel_version\n- '8.0'\n+An ``IsRhel*`` component is valid if the\n+:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates\n+the major RHEL version represented by the component. Otherwise, it raises a\n+:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from\n+executing.\n+\n+In particular, an ``IsRhel*`` component can be added as a dependency of a\n+parser to limit it to a given version.\n \"\"\"\n \n from insights.core.plugins import component\n", "issue": "Incorrect insights.components.rhel_version module doc\nThe [example](https://github.com/RedHatInsights/insights-core/blob/master/insights/components/rhel_version.py#L17) in the insights.components.rhel_version pydoc doesn't work. The objects don't have a `rhel_version` attribute.\n", "before_files": [{"content": "\"\"\"\nIsRhel6, IsRhel7 and IsRhel8\n===============================\n\nThe ``IsRhel*`` components each use the ``RedhatRelease`` combiner to\nretrieve the RHEL version information.\nEach component checks if the release version matches the version it represents,\nif the version does not match what is expected the class raises ``SkipComponent``\nso that the dependent component will not fire.\nCan be added as a dependency of a parser so that the parser only fires if the\n``IsRhel*`` dependency is met.\n\nAn example from the following ``/etc/redhat_release`` file output::\n\n Red Hat Enterprise Linux release 8.0 (Ootpa)\n\nExample:\n\n >>> type(IsRhel8)\n <class 'insights.components.rhel_version.Is_Rhel8'>\n >>> is_rhel8.rhel_version\n '8.0'\n\"\"\"\n\nfrom insights.core.plugins import component\nfrom insights.combiners.redhat_release import RedHatRelease\nfrom insights.core.dr import SkipComponent\n\n\n@component(RedHatRelease)\nclass IsRhel6(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL6, if not\n RHEL6 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL6.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 6:\n raise SkipComponent('Not RHEL6')\n\n\n@component(RedHatRelease)\nclass IsRhel7(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL7, if not \\\n RHEL7 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL7.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 7:\n raise SkipComponent('Not RHEL7')\n\n\n@component(RedHatRelease)\nclass IsRhel8(object):\n \"\"\"\n This component uses ``RedhatRelease`` combiner\n to determine RHEL version. It checks if RHEL8, if not\n RHEL8 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL8.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 8:\n raise SkipComponent('Not RHEL8')\n", "path": "insights/components/rhel_version.py"}], "after_files": [{"content": "\"\"\"\nIsRhel6, IsRhel7 and IsRhel8\n===============================\n\nAn ``IsRhel*`` component is valid if the\n:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates\nthe major RHEL version represented by the component. Otherwise, it raises a\n:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from\nexecuting.\n\nIn particular, an ``IsRhel*`` component can be added as a dependency of a\nparser to limit it to a given version.\n\"\"\"\n\nfrom insights.core.plugins import component\nfrom insights.combiners.redhat_release import RedHatRelease\nfrom insights.core.dr import SkipComponent\n\n\n@component(RedHatRelease)\nclass IsRhel6(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL6, if not\n RHEL6 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL6.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 6:\n raise SkipComponent('Not RHEL6')\n\n\n@component(RedHatRelease)\nclass IsRhel7(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL7, if not \\\n RHEL7 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL7.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 7:\n raise SkipComponent('Not RHEL7')\n\n\n@component(RedHatRelease)\nclass IsRhel8(object):\n \"\"\"\n This component uses ``RedhatRelease`` combiner\n to determine RHEL version. It checks if RHEL8, if not\n RHEL8 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL8.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 8:\n raise SkipComponent('Not RHEL8')\n", "path": "insights/components/rhel_version.py"}]} | 1,003 | 365 |
gh_patches_debug_20533 | rasdani/github-patches | git_diff | nilearn__nilearn-2264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strange comment in ICA example
This is the comment:
# XXX: must get the code to run for more than 1 subject
And this is the link to the example:
https://nilearn.github.io/auto_examples/05_advanced/plot_ica_resting_state.html
The comment appears to relate to an internal todo rather than comment/instruction to users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/05_advanced/plot_ica_resting_state.py`
Content:
```
1 """
2 Multivariate decompositions: Independent component analysis of fMRI
3 ===================================================================
4
5
6 This example is meant to demonstrate nilearn as a low-level tools used to
7 combine feature extraction with a multivariate decomposition algorithm
8 for movie-watching.
9
10 This example is a toy. To apply ICA to fmri timeseries data, it is advised
11 to look at the example
12 :ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.
13
14 The example here applies the scikit-learn ICA to movie watching timeseries data.
15 Note that following the code in the example, any unsupervised
16 decomposition model, or other latent-factor models, can be applied to
17 the data, as the scikit-learn API enables to exchange them as almost
18 black box (though the relevant parameter for brain maps might no longer
19 be given by a call to fit_transform).
20
21 """
22
23 #####################################################################
24 # Load movie watching dataset
25 from nilearn import datasets
26 # Here we use only single subject to get faster-running code. For better
27 # results, simply increase this number
28 # XXX: must get the code to run for more than 1 subject
29 dataset = datasets.fetch_development_fmri(n_subjects=1)
30 func_filename = dataset.func[0]
31
32 # print basic information on the dataset
33 print('First subject functional nifti image (4D) is at: %s' %
34 dataset.func[0]) # 4D data
35
36
37 #####################################################################
38 # Preprocess
39 from nilearn.input_data import NiftiMasker
40
41 # This is fmri timeseries data: the background has not been removed yet,
42 # thus we need to use mask_strategy='epi' to compute the mask from the
43 # EPI images
44 masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,
45 mask_strategy='epi', standardize=True)
46 data_masked = masker.fit_transform(func_filename)
47
48 # Concatenate all the subjects
49 # fmri_data = np.concatenate(data_masked, axis=1)
50 fmri_data = data_masked
51
52
53 #####################################################################
54 # Apply ICA
55
56 from sklearn.decomposition import FastICA
57 n_components = 10
58 ica = FastICA(n_components=n_components, random_state=42)
59 components_masked = ica.fit_transform(data_masked.T).T
60
61 # Normalize estimated components, for thresholding to make sense
62 components_masked -= components_masked.mean(axis=0)
63 components_masked /= components_masked.std(axis=0)
64 # Threshold
65 import numpy as np
66 components_masked[np.abs(components_masked) < .8] = 0
67
68 # Now invert the masking operation, going back to a full 3D
69 # representation
70 component_img = masker.inverse_transform(components_masked)
71
72 #####################################################################
73 # Visualize the results
74
75 # Show some interesting components
76 from nilearn import image
77 from nilearn.plotting import plot_stat_map, show
78
79 # Use the mean as a background
80 mean_img = image.mean_img(func_filename)
81
82 plot_stat_map(image.index_img(component_img, 0), mean_img)
83
84 plot_stat_map(image.index_img(component_img, 1), mean_img)
85
86 show()
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py
--- a/examples/05_advanced/plot_ica_resting_state.py
+++ b/examples/05_advanced/plot_ica_resting_state.py
@@ -23,9 +23,7 @@
#####################################################################
# Load movie watching dataset
from nilearn import datasets
-# Here we use only single subject to get faster-running code. For better
-# results, simply increase this number
-# XXX: must get the code to run for more than 1 subject
+# Here we use only single subject to get faster-running code.
dataset = datasets.fetch_development_fmri(n_subjects=1)
func_filename = dataset.func[0]
@@ -45,10 +43,6 @@
mask_strategy='epi', standardize=True)
data_masked = masker.fit_transform(func_filename)
-# Concatenate all the subjects
-# fmri_data = np.concatenate(data_masked, axis=1)
-fmri_data = data_masked
-
#####################################################################
# Apply ICA
| {"golden_diff": "diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py\n--- a/examples/05_advanced/plot_ica_resting_state.py\n+++ b/examples/05_advanced/plot_ica_resting_state.py\n@@ -23,9 +23,7 @@\n #####################################################################\n # Load movie watching dataset\n from nilearn import datasets\n-# Here we use only single subject to get faster-running code. For better\n-# results, simply increase this number\n-# XXX: must get the code to run for more than 1 subject\n+# Here we use only single subject to get faster-running code.\n dataset = datasets.fetch_development_fmri(n_subjects=1)\n func_filename = dataset.func[0]\n \n@@ -45,10 +43,6 @@\n mask_strategy='epi', standardize=True)\n data_masked = masker.fit_transform(func_filename)\n \n-# Concatenate all the subjects\n-# fmri_data = np.concatenate(data_masked, axis=1)\n-fmri_data = data_masked\n-\n \n #####################################################################\n # Apply ICA\n", "issue": "Strange comment in ICA example\nThis is the comment:\r\n# XXX: must get the code to run for more than 1 subject\r\nAnd this is the link to the example:\r\nhttps://nilearn.github.io/auto_examples/05_advanced/plot_ica_resting_state.html\r\n\r\nThe comment appears to relate to an internal todo rather than comment/instruction to users.\n", "before_files": [{"content": "\"\"\"\nMultivariate decompositions: Independent component analysis of fMRI\n===================================================================\n\n\nThis example is meant to demonstrate nilearn as a low-level tools used to\ncombine feature extraction with a multivariate decomposition algorithm\nfor movie-watching.\n\nThis example is a toy. To apply ICA to fmri timeseries data, it is advised\nto look at the example\n:ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.\n\nThe example here applies the scikit-learn ICA to movie watching timeseries data.\nNote that following the code in the example, any unsupervised\ndecomposition model, or other latent-factor models, can be applied to\nthe data, as the scikit-learn API enables to exchange them as almost\nblack box (though the relevant parameter for brain maps might no longer\nbe given by a call to fit_transform).\n\n\"\"\"\n\n#####################################################################\n# Load movie watching dataset\nfrom nilearn import datasets\n# Here we use only single subject to get faster-running code. For better\n# results, simply increase this number\n# XXX: must get the code to run for more than 1 subject\ndataset = datasets.fetch_development_fmri(n_subjects=1)\nfunc_filename = dataset.func[0]\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n dataset.func[0]) # 4D data\n\n\n#####################################################################\n# Preprocess\nfrom nilearn.input_data import NiftiMasker\n\n# This is fmri timeseries data: the background has not been removed yet,\n# thus we need to use mask_strategy='epi' to compute the mask from the\n# EPI images\nmasker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,\n mask_strategy='epi', standardize=True)\ndata_masked = masker.fit_transform(func_filename)\n\n# Concatenate all the subjects\n# fmri_data = np.concatenate(data_masked, axis=1)\nfmri_data = data_masked\n\n\n#####################################################################\n# Apply ICA\n\nfrom sklearn.decomposition import FastICA\nn_components = 10\nica = FastICA(n_components=n_components, random_state=42)\ncomponents_masked = ica.fit_transform(data_masked.T).T\n\n# Normalize estimated components, for thresholding to make sense\ncomponents_masked -= components_masked.mean(axis=0)\ncomponents_masked /= components_masked.std(axis=0)\n# Threshold\nimport numpy as np\ncomponents_masked[np.abs(components_masked) < .8] = 0\n\n# Now invert the masking operation, going back to a full 3D\n# representation\ncomponent_img = masker.inverse_transform(components_masked)\n\n#####################################################################\n# Visualize the results\n\n# Show some interesting components\nfrom nilearn import image\nfrom nilearn.plotting import plot_stat_map, show\n\n# Use the mean as a background\nmean_img = image.mean_img(func_filename)\n\nplot_stat_map(image.index_img(component_img, 0), mean_img)\n\nplot_stat_map(image.index_img(component_img, 1), mean_img)\n\nshow()\n", "path": "examples/05_advanced/plot_ica_resting_state.py"}], "after_files": [{"content": "\"\"\"\nMultivariate decompositions: Independent component analysis of fMRI\n===================================================================\n\n\nThis example is meant to demonstrate nilearn as a low-level tools used to\ncombine feature extraction with a multivariate decomposition algorithm\nfor movie-watching.\n\nThis example is a toy. To apply ICA to fmri timeseries data, it is advised\nto look at the example\n:ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.\n\nThe example here applies the scikit-learn ICA to movie watching timeseries data.\nNote that following the code in the example, any unsupervised\ndecomposition model, or other latent-factor models, can be applied to\nthe data, as the scikit-learn API enables to exchange them as almost\nblack box (though the relevant parameter for brain maps might no longer\nbe given by a call to fit_transform).\n\n\"\"\"\n\n#####################################################################\n# Load movie watching dataset\nfrom nilearn import datasets\n# Here we use only single subject to get faster-running code.\ndataset = datasets.fetch_development_fmri(n_subjects=1)\nfunc_filename = dataset.func[0]\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n dataset.func[0]) # 4D data\n\n\n#####################################################################\n# Preprocess\nfrom nilearn.input_data import NiftiMasker\n\n# This is fmri timeseries data: the background has not been removed yet,\n# thus we need to use mask_strategy='epi' to compute the mask from the\n# EPI images\nmasker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,\n mask_strategy='epi', standardize=True)\ndata_masked = masker.fit_transform(func_filename)\n\n\n#####################################################################\n# Apply ICA\n\nfrom sklearn.decomposition import FastICA\nn_components = 10\nica = FastICA(n_components=n_components, random_state=42)\ncomponents_masked = ica.fit_transform(data_masked.T).T\n\n# Normalize estimated components, for thresholding to make sense\ncomponents_masked -= components_masked.mean(axis=0)\ncomponents_masked /= components_masked.std(axis=0)\n# Threshold\nimport numpy as np\ncomponents_masked[np.abs(components_masked) < .8] = 0\n\n# Now invert the masking operation, going back to a full 3D\n# representation\ncomponent_img = masker.inverse_transform(components_masked)\n\n#####################################################################\n# Visualize the results\n\n# Show some interesting components\nfrom nilearn import image\nfrom nilearn.plotting import plot_stat_map, show\n\n# Use the mean as a background\nmean_img = image.mean_img(func_filename)\n\nplot_stat_map(image.index_img(component_img, 0), mean_img)\n\nplot_stat_map(image.index_img(component_img, 1), mean_img)\n\nshow()\n", "path": "examples/05_advanced/plot_ica_resting_state.py"}]} | 1,197 | 248 |
gh_patches_debug_26036 | rasdani/github-patches | git_diff | python-discord__bot-1293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter functionality for emoji-spam messages
Now that someone's figured out you can cause havok by dumping massive amounts of emojis into a channel, we need a filter to automatically manage this ASAP. Detection should be fairly simple, a very high emoji count is required for the effect (>20/message to >250 for varying impact) but emoji variance is not required.
Tangentially: This is also likely related to a discord update that went out that has slowed the emoji loading process recently.
I think we can safely filter out and autodelete high emoji messages without significant false positives.
We may want to handle both unicode emojis and server emojis.
It's possible for unicode emojis to get a higher "load" because they could as one character for message transmission purposes but server emojis count for more. However, server emojis may be animated, and will apply significant load by that fact as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/rules/discord_emojis.py`
Content:
```
1 import re
2 from typing import Dict, Iterable, List, Optional, Tuple
3
4 from discord import Member, Message
5
6
7 DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>")
8 CODE_BLOCK_RE = re.compile(r"```.*?```", flags=re.DOTALL)
9
10
11 async def apply(
12 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
13 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
14 """Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user."""
15 relevant_messages = tuple(
16 msg
17 for msg in recent_messages
18 if msg.author == last_message.author
19 )
20
21 # Get rid of code blocks in the message before searching for emojis.
22 total_emojis = sum(
23 len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub("", msg.content)))
24 for msg in relevant_messages
25 )
26
27 if total_emojis > config['max']:
28 return (
29 f"sent {total_emojis} emojis in {config['interval']}s",
30 (last_message.author,),
31 relevant_messages
32 )
33 return None
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/rules/discord_emojis.py b/bot/rules/discord_emojis.py
--- a/bot/rules/discord_emojis.py
+++ b/bot/rules/discord_emojis.py
@@ -2,16 +2,17 @@
from typing import Dict, Iterable, List, Optional, Tuple
from discord import Member, Message
+from emoji import demojize
-DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>")
+DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>|:\w+:")
CODE_BLOCK_RE = re.compile(r"```.*?```", flags=re.DOTALL)
async def apply(
last_message: Message, recent_messages: List[Message], config: Dict[str, int]
) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
- """Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user."""
+ """Detects total Discord emojis exceeding the limit sent by a single user."""
relevant_messages = tuple(
msg
for msg in recent_messages
@@ -19,8 +20,9 @@
)
# Get rid of code blocks in the message before searching for emojis.
+ # Convert Unicode emojis to :emoji: format to get their count.
total_emojis = sum(
- len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub("", msg.content)))
+ len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub("", msg.content))))
for msg in relevant_messages
)
| {"golden_diff": "diff --git a/bot/rules/discord_emojis.py b/bot/rules/discord_emojis.py\n--- a/bot/rules/discord_emojis.py\n+++ b/bot/rules/discord_emojis.py\n@@ -2,16 +2,17 @@\n from typing import Dict, Iterable, List, Optional, Tuple\n \n from discord import Member, Message\n+from emoji import demojize\n \n \n-DISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>\")\n+DISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>|:\\w+:\")\n CODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n \n \n async def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n- \"\"\"Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user.\"\"\"\n+ \"\"\"Detects total Discord emojis exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n@@ -19,8 +20,9 @@\n )\n \n # Get rid of code blocks in the message before searching for emojis.\n+ # Convert Unicode emojis to :emoji: format to get their count.\n total_emojis = sum(\n- len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub(\"\", msg.content)))\n+ len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub(\"\", msg.content))))\n for msg in relevant_messages\n )\n", "issue": "Filter functionality for emoji-spam messages\nNow that someone's figured out you can cause havok by dumping massive amounts of emojis into a channel, we need a filter to automatically manage this ASAP. Detection should be fairly simple, a very high emoji count is required for the effect (>20/message to >250 for varying impact) but emoji variance is not required. \r\n\r\nTangentially: This is also likely related to a discord update that went out that has slowed the emoji loading process recently.\r\n\r\nI think we can safely filter out and autodelete high emoji messages without significant false positives. \r\n\r\nWe may want to handle both unicode emojis and server emojis. \r\n\r\nIt's possible for unicode emojis to get a higher \"load\" because they could as one character for message transmission purposes but server emojis count for more. However, server emojis may be animated, and will apply significant load by that fact as well.\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nDISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>\")\nCODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if msg.author == last_message.author\n )\n\n # Get rid of code blocks in the message before searching for emojis.\n total_emojis = sum(\n len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub(\"\", msg.content)))\n for msg in relevant_messages\n )\n\n if total_emojis > config['max']:\n return (\n f\"sent {total_emojis} emojis in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/discord_emojis.py"}], "after_files": [{"content": "import re\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\nfrom emoji import demojize\n\n\nDISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>|:\\w+:\")\nCODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total Discord emojis exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if msg.author == last_message.author\n )\n\n # Get rid of code blocks in the message before searching for emojis.\n # Convert Unicode emojis to :emoji: format to get their count.\n total_emojis = sum(\n len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub(\"\", msg.content))))\n for msg in relevant_messages\n )\n\n if total_emojis > config['max']:\n return (\n f\"sent {total_emojis} emojis in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/discord_emojis.py"}]} | 760 | 351 |
gh_patches_debug_38076 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Event Planning Data Set
## Problem
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
We don't have an example data set for our live demo that shows how one can use Mathesar to schedule and plan in time.
## Proposed solution
<!-- A clear and concise description of your proposed solution or feature. -->
We'd like to simulate a data set representing a conference (maybe the "Mathesar DevCon" or the like). The conference should take place over a weekend (Friday, Saturday, Sunday). The conference should have speakers who give talks in rooms in some fashion. We'd also like to have 'tracks' describing sequentially scheduled talks (perhaps in different rooms) on some topic.
Example explorations: "First day speakers", "Room A schedule", "everyone talking during time B"
Tables (tentative):
- Speakers
- id
- Last Name
- First Name
- Bio
- Rooms
- id
- Room Number
- Capacity
- Topics
- id
- Name
- Tracks
- id
- Name
- Days (just convenience)
- id
- Name (First Day, Second Day, Third Day)
- Date (actual Date)
- Talks
- id
- Date (fkey)
- Time
- Room (fkey)
- Title
- Speaker (fkey)
- Topic (fkey)
- Track (fkey)
- Attendees
- id
- Last Name
- First Name
- Email
If we get this far, we could consider expanding. Also, this table setup should be considered rough, and the implementer is welcome to change/improve it. The goals are explorations like those mentioned above.
## Additional context
<!-- Add any other context or screenshots about the feature request here.-->
See the [Weekly meeting notes](https://wiki.mathesar.org/en/meeting-notes/2022-11/2022-11-17-weekly) where we discussed example data.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `demo/install.py`
Content:
```
1 import bz2
2 import os
3
4 from sqlalchemy import text
5 from sqlalchemy.exc import OperationalError
6
7 from db.engine import create_future_engine
8
9 from demo.arxiv_skeleton import setup_and_register_schema_for_receiving_arxiv_data
10 from mathesar.models.base import Table, Schema, PreviewColumnSettings
11
12 FILE_DIR = os.path.abspath(os.path.dirname(__file__))
13 RESOURCES = os.path.join(FILE_DIR, "resources")
14 LIBRARY_ONE = os.path.join(RESOURCES, "library_without_checkouts.sql")
15 LIBRARY_TWO = os.path.join(RESOURCES, "library_add_checkouts.sql")
16 LIBRARY_MANAGEMENT = 'Library Management'
17 MOVIE_COLLECTION = 'Movie Collection'
18 ARXIV = 'Latest Papers from arXiv'
19 MOVIES_SQL_BZ2 = os.path.join(RESOURCES, "movie_collection.sql.bz2")
20
21
22 def load_datasets(engine):
23 """Load some SQL files with demo data to DB targeted by `engine`."""
24 _load_library_dataset(engine)
25 _load_movies_dataset(engine)
26 _load_arxiv_data_skeleton(engine)
27
28
29 def _load_library_dataset(engine):
30 """
31 Load the library dataset into a "Library Management" schema.
32
33 Uses given engine to define database to load into.
34 Destructive, and will knock out any previous "Library Management"
35 schema in the given database.
36 """
37 drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{LIBRARY_MANAGEMENT}";""")
38 create_schema_query = text(f"""CREATE SCHEMA "{LIBRARY_MANAGEMENT}";""")
39 set_search_path = text(f"""SET search_path="{LIBRARY_MANAGEMENT}";""")
40 with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:
41 conn.execute(drop_schema_query)
42 conn.execute(create_schema_query)
43 conn.execute(set_search_path)
44 conn.execute(text(f1.read()))
45 conn.execute(text(f2.read()))
46
47
48 def _load_movies_dataset(engine):
49 drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{MOVIE_COLLECTION}" CASCADE;""")
50 create_schema_query = text(f"""CREATE SCHEMA "{MOVIE_COLLECTION}";""")
51 set_search_path = text(f"""SET search_path="{MOVIE_COLLECTION}";""")
52 with engine.begin() as conn, bz2.open(MOVIES_SQL_BZ2, 'rt') as f:
53 conn.execute(drop_schema_query)
54 conn.execute(create_schema_query)
55 conn.execute(set_search_path)
56 conn.execute(text(f.read()))
57
58
59 def _load_arxiv_data_skeleton(engine):
60 setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)
61
62
63 def customize_settings(engine):
64 """Set preview settings so demo data looks good."""
65 _customize_library_preview_settings(engine)
66
67
68 def _customize_library_preview_settings(engine):
69 schema = _get_dj_schema_by_name(engine, LIBRARY_MANAGEMENT)
70 authors = _get_dj_table_by_name(schema, 'Authors')
71 _set_first_and_last_names_preview(authors)
72 patrons = _get_dj_table_by_name(schema, 'Patrons')
73 _set_first_and_last_names_preview(patrons)
74
75
76 def _set_first_and_last_names_preview(table):
77 first_name = _get_dj_column_by_name(table, 'First Name')
78 last_name = _get_dj_column_by_name(table, 'Last Name')
79 template = f'{{{first_name.id}}} {{{last_name.id}}}'
80 new_preview_settings = PreviewColumnSettings.objects.create(
81 customized=True, template=template
82 )
83 table.settings.preview_settings = new_preview_settings
84 table.settings.save()
85
86
87 def _get_dj_schema_by_name(engine, name):
88 db_name = engine.url.database
89 schemas = Schema.objects.filter(database__name=db_name)
90 for s in schemas:
91 if s.name == name:
92 return s
93
94
95 def _get_dj_table_by_name(schema, name):
96 tables = Table.objects.filter(schema=schema)
97 for t in tables:
98 if t.name == name:
99 return t
100
101
102 def _get_dj_column_by_name(table, name):
103 columns = table.columns.all()
104 for c in columns:
105 if c.name == name:
106 return c
107
108
109 def create_demo_database(
110 user_db, username, password, hostname, root_db, port, template_db
111 ):
112 """Create database, install Mathesar on it, add demo data."""
113 user_db_engine = create_future_engine(
114 username, password, hostname, user_db, port
115 )
116 try:
117 user_db_engine.connect()
118 user_db_engine.dispose()
119 print(f"Database {user_db} already exists! Skipping...")
120 except OperationalError:
121 root_db_engine = create_future_engine(
122 username, password, hostname, root_db, port,
123 )
124 with root_db_engine.connect() as conn:
125 conn.execution_options(isolation_level="AUTOCOMMIT")
126 conn.execute(text(f"CREATE DATABASE {user_db} TEMPLATE {template_db};"))
127 root_db_engine.dispose()
128 user_db_engine.dispose()
129 print(f"Created DB is {user_db}.")
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/demo/install.py b/demo/install.py
--- a/demo/install.py
+++ b/demo/install.py
@@ -13,7 +13,11 @@
RESOURCES = os.path.join(FILE_DIR, "resources")
LIBRARY_ONE = os.path.join(RESOURCES, "library_without_checkouts.sql")
LIBRARY_TWO = os.path.join(RESOURCES, "library_add_checkouts.sql")
+DEVCON_DATASET = os.path.join(RESOURCES, "devcon_dataset.sql")
+MOVIES_SQL_BZ2 = os.path.join(RESOURCES, "movie_collection.sql.bz2")
+
LIBRARY_MANAGEMENT = 'Library Management'
+MATHESAR_CON = 'Mathesar Con'
MOVIE_COLLECTION = 'Movie Collection'
ARXIV = 'Latest Papers from arXiv'
MOVIES_SQL_BZ2 = os.path.join(RESOURCES, "movie_collection.sql.bz2")
@@ -23,6 +27,7 @@
"""Load some SQL files with demo data to DB targeted by `engine`."""
_load_library_dataset(engine)
_load_movies_dataset(engine)
+ _load_devcon_dataset(engine)
_load_arxiv_data_skeleton(engine)
@@ -56,6 +61,17 @@
conn.execute(text(f.read()))
+def _load_devcon_dataset(engine):
+ drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{MATHESAR_CON}" CASCADE;""")
+ create_schema_query = text(f"""CREATE SCHEMA "{MATHESAR_CON}";""")
+ set_search_path = text(f"""SET search_path="{MATHESAR_CON}";""")
+ with engine.begin() as conn, open(DEVCON_DATASET) as f:
+ conn.execute(drop_schema_query)
+ conn.execute(create_schema_query)
+ conn.execute(set_search_path)
+ conn.execute(text(f.read()))
+
+
def _load_arxiv_data_skeleton(engine):
setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)
@@ -63,6 +79,7 @@
def customize_settings(engine):
"""Set preview settings so demo data looks good."""
_customize_library_preview_settings(engine)
+ _customize_devcon_preview_settings(engine)
def _customize_library_preview_settings(engine):
@@ -73,6 +90,12 @@
_set_first_and_last_names_preview(patrons)
+def _customize_devcon_preview_settings(engine):
+ schema = _get_dj_schema_by_name(engine, MATHESAR_CON)
+ presenters = _get_dj_table_by_name(schema, 'Presenters')
+ _set_first_and_last_names_preview(presenters)
+
+
def _set_first_and_last_names_preview(table):
first_name = _get_dj_column_by_name(table, 'First Name')
last_name = _get_dj_column_by_name(table, 'Last Name')
| {"golden_diff": "diff --git a/demo/install.py b/demo/install.py\n--- a/demo/install.py\n+++ b/demo/install.py\n@@ -13,7 +13,11 @@\n RESOURCES = os.path.join(FILE_DIR, \"resources\")\n LIBRARY_ONE = os.path.join(RESOURCES, \"library_without_checkouts.sql\")\n LIBRARY_TWO = os.path.join(RESOURCES, \"library_add_checkouts.sql\")\n+DEVCON_DATASET = os.path.join(RESOURCES, \"devcon_dataset.sql\")\n+MOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n+\n LIBRARY_MANAGEMENT = 'Library Management'\n+MATHESAR_CON = 'Mathesar Con'\n MOVIE_COLLECTION = 'Movie Collection'\n ARXIV = 'Latest Papers from arXiv'\n MOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n@@ -23,6 +27,7 @@\n \"\"\"Load some SQL files with demo data to DB targeted by `engine`.\"\"\"\n _load_library_dataset(engine)\n _load_movies_dataset(engine)\n+ _load_devcon_dataset(engine)\n _load_arxiv_data_skeleton(engine)\n \n \n@@ -56,6 +61,17 @@\n conn.execute(text(f.read()))\n \n \n+def _load_devcon_dataset(engine):\n+ drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MATHESAR_CON}\" CASCADE;\"\"\")\n+ create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MATHESAR_CON}\";\"\"\")\n+ set_search_path = text(f\"\"\"SET search_path=\"{MATHESAR_CON}\";\"\"\")\n+ with engine.begin() as conn, open(DEVCON_DATASET) as f:\n+ conn.execute(drop_schema_query)\n+ conn.execute(create_schema_query)\n+ conn.execute(set_search_path)\n+ conn.execute(text(f.read()))\n+\n+\n def _load_arxiv_data_skeleton(engine):\n setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)\n \n@@ -63,6 +79,7 @@\n def customize_settings(engine):\n \"\"\"Set preview settings so demo data looks good.\"\"\"\n _customize_library_preview_settings(engine)\n+ _customize_devcon_preview_settings(engine)\n \n \n def _customize_library_preview_settings(engine):\n@@ -73,6 +90,12 @@\n _set_first_and_last_names_preview(patrons)\n \n \n+def _customize_devcon_preview_settings(engine):\n+ schema = _get_dj_schema_by_name(engine, MATHESAR_CON)\n+ presenters = _get_dj_table_by_name(schema, 'Presenters')\n+ _set_first_and_last_names_preview(presenters)\n+\n+\n def _set_first_and_last_names_preview(table):\n first_name = _get_dj_column_by_name(table, 'First Name')\n last_name = _get_dj_column_by_name(table, 'Last Name')\n", "issue": "Event Planning Data Set\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nWe don't have an example data set for our live demo that shows how one can use Mathesar to schedule and plan in time.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nWe'd like to simulate a data set representing a conference (maybe the \"Mathesar DevCon\" or the like). The conference should take place over a weekend (Friday, Saturday, Sunday). The conference should have speakers who give talks in rooms in some fashion. We'd also like to have 'tracks' describing sequentially scheduled talks (perhaps in different rooms) on some topic.\r\n\r\nExample explorations: \"First day speakers\", \"Room A schedule\", \"everyone talking during time B\"\r\n\r\nTables (tentative):\r\n- Speakers\r\n - id\r\n - Last Name\r\n - First Name\r\n - Bio\r\n- Rooms\r\n - id\r\n - Room Number\r\n - Capacity\r\n- Topics\r\n - id\r\n - Name\r\n- Tracks\r\n - id\r\n - Name\r\n- Days (just convenience)\r\n - id\r\n - Name (First Day, Second Day, Third Day)\r\n - Date (actual Date)\r\n- Talks\r\n - id\r\n - Date (fkey)\r\n - Time\r\n - Room (fkey)\r\n - Title\r\n - Speaker (fkey)\r\n - Topic (fkey)\r\n - Track (fkey)\r\n- Attendees\r\n - id\r\n - Last Name\r\n - First Name\r\n - Email\r\n\r\nIf we get this far, we could consider expanding. Also, this table setup should be considered rough, and the implementer is welcome to change/improve it. The goals are explorations like those mentioned above.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nSee the [Weekly meeting notes](https://wiki.mathesar.org/en/meeting-notes/2022-11/2022-11-17-weekly) where we discussed example data.\n", "before_files": [{"content": "import bz2\nimport os\n\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db.engine import create_future_engine\n\nfrom demo.arxiv_skeleton import setup_and_register_schema_for_receiving_arxiv_data\nfrom mathesar.models.base import Table, Schema, PreviewColumnSettings\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nRESOURCES = os.path.join(FILE_DIR, \"resources\")\nLIBRARY_ONE = os.path.join(RESOURCES, \"library_without_checkouts.sql\")\nLIBRARY_TWO = os.path.join(RESOURCES, \"library_add_checkouts.sql\")\nLIBRARY_MANAGEMENT = 'Library Management'\nMOVIE_COLLECTION = 'Movie Collection'\nARXIV = 'Latest Papers from arXiv'\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\n\ndef load_datasets(engine):\n \"\"\"Load some SQL files with demo data to DB targeted by `engine`.\"\"\"\n _load_library_dataset(engine)\n _load_movies_dataset(engine)\n _load_arxiv_data_skeleton(engine)\n\n\ndef _load_library_dataset(engine):\n \"\"\"\n Load the library dataset into a \"Library Management\" schema.\n\n Uses given engine to define database to load into.\n Destructive, and will knock out any previous \"Library Management\"\n schema in the given database.\n \"\"\"\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{LIBRARY_MANAGEMENT}\";\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{LIBRARY_MANAGEMENT}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{LIBRARY_MANAGEMENT}\";\"\"\")\n with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f1.read()))\n conn.execute(text(f2.read()))\n\n\ndef _load_movies_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MOVIE_COLLECTION}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MOVIE_COLLECTION}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MOVIE_COLLECTION}\";\"\"\")\n with engine.begin() as conn, bz2.open(MOVIES_SQL_BZ2, 'rt') as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_arxiv_data_skeleton(engine):\n setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)\n\n\ndef customize_settings(engine):\n \"\"\"Set preview settings so demo data looks good.\"\"\"\n _customize_library_preview_settings(engine)\n\n\ndef _customize_library_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, LIBRARY_MANAGEMENT)\n authors = _get_dj_table_by_name(schema, 'Authors')\n _set_first_and_last_names_preview(authors)\n patrons = _get_dj_table_by_name(schema, 'Patrons')\n _set_first_and_last_names_preview(patrons)\n\n\ndef _set_first_and_last_names_preview(table):\n first_name = _get_dj_column_by_name(table, 'First Name')\n last_name = _get_dj_column_by_name(table, 'Last Name')\n template = f'{{{first_name.id}}} {{{last_name.id}}}'\n new_preview_settings = PreviewColumnSettings.objects.create(\n customized=True, template=template\n )\n table.settings.preview_settings = new_preview_settings\n table.settings.save()\n\n\ndef _get_dj_schema_by_name(engine, name):\n db_name = engine.url.database\n schemas = Schema.objects.filter(database__name=db_name)\n for s in schemas:\n if s.name == name:\n return s\n\n\ndef _get_dj_table_by_name(schema, name):\n tables = Table.objects.filter(schema=schema)\n for t in tables:\n if t.name == name:\n return t\n\n\ndef _get_dj_column_by_name(table, name):\n columns = table.columns.all()\n for c in columns:\n if c.name == name:\n return c\n\n\ndef create_demo_database(\n user_db, username, password, hostname, root_db, port, template_db\n):\n \"\"\"Create database, install Mathesar on it, add demo data.\"\"\"\n user_db_engine = create_future_engine(\n username, password, hostname, user_db, port\n )\n try:\n user_db_engine.connect()\n user_db_engine.dispose()\n print(f\"Database {user_db} already exists! Skipping...\")\n except OperationalError:\n root_db_engine = create_future_engine(\n username, password, hostname, root_db, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {user_db} TEMPLATE {template_db};\"))\n root_db_engine.dispose()\n user_db_engine.dispose()\n print(f\"Created DB is {user_db}.\")\n", "path": "demo/install.py"}], "after_files": [{"content": "import bz2\nimport os\n\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db.engine import create_future_engine\n\nfrom demo.arxiv_skeleton import setup_and_register_schema_for_receiving_arxiv_data\nfrom mathesar.models.base import Table, Schema, PreviewColumnSettings\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nRESOURCES = os.path.join(FILE_DIR, \"resources\")\nLIBRARY_ONE = os.path.join(RESOURCES, \"library_without_checkouts.sql\")\nLIBRARY_TWO = os.path.join(RESOURCES, \"library_add_checkouts.sql\")\nDEVCON_DATASET = os.path.join(RESOURCES, \"devcon_dataset.sql\")\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\nLIBRARY_MANAGEMENT = 'Library Management'\nMATHESAR_CON = 'Mathesar Con'\nMOVIE_COLLECTION = 'Movie Collection'\nARXIV = 'Latest Papers from arXiv'\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\n\ndef load_datasets(engine):\n \"\"\"Load some SQL files with demo data to DB targeted by `engine`.\"\"\"\n _load_library_dataset(engine)\n _load_movies_dataset(engine)\n _load_devcon_dataset(engine)\n _load_arxiv_data_skeleton(engine)\n\n\ndef _load_library_dataset(engine):\n \"\"\"\n Load the library dataset into a \"Library Management\" schema.\n\n Uses given engine to define database to load into.\n Destructive, and will knock out any previous \"Library Management\"\n schema in the given database.\n \"\"\"\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{LIBRARY_MANAGEMENT}\";\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{LIBRARY_MANAGEMENT}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{LIBRARY_MANAGEMENT}\";\"\"\")\n with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f1.read()))\n conn.execute(text(f2.read()))\n\n\ndef _load_movies_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MOVIE_COLLECTION}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MOVIE_COLLECTION}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MOVIE_COLLECTION}\";\"\"\")\n with engine.begin() as conn, bz2.open(MOVIES_SQL_BZ2, 'rt') as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_devcon_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MATHESAR_CON}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MATHESAR_CON}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MATHESAR_CON}\";\"\"\")\n with engine.begin() as conn, open(DEVCON_DATASET) as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_arxiv_data_skeleton(engine):\n setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)\n\n\ndef customize_settings(engine):\n \"\"\"Set preview settings so demo data looks good.\"\"\"\n _customize_library_preview_settings(engine)\n _customize_devcon_preview_settings(engine)\n\n\ndef _customize_library_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, LIBRARY_MANAGEMENT)\n authors = _get_dj_table_by_name(schema, 'Authors')\n _set_first_and_last_names_preview(authors)\n patrons = _get_dj_table_by_name(schema, 'Patrons')\n _set_first_and_last_names_preview(patrons)\n\n\ndef _customize_devcon_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, MATHESAR_CON)\n presenters = _get_dj_table_by_name(schema, 'Presenters')\n _set_first_and_last_names_preview(presenters)\n\n\ndef _set_first_and_last_names_preview(table):\n first_name = _get_dj_column_by_name(table, 'First Name')\n last_name = _get_dj_column_by_name(table, 'Last Name')\n template = f'{{{first_name.id}}} {{{last_name.id}}}'\n new_preview_settings = PreviewColumnSettings.objects.create(\n customized=True, template=template\n )\n table.settings.preview_settings = new_preview_settings\n table.settings.save()\n\n\ndef _get_dj_schema_by_name(engine, name):\n db_name = engine.url.database\n schemas = Schema.objects.filter(database__name=db_name)\n for s in schemas:\n if s.name == name:\n return s\n\n\ndef _get_dj_table_by_name(schema, name):\n tables = Table.objects.filter(schema=schema)\n for t in tables:\n if t.name == name:\n return t\n\n\ndef _get_dj_column_by_name(table, name):\n columns = table.columns.all()\n for c in columns:\n if c.name == name:\n return c\n\n\ndef create_demo_database(\n user_db, username, password, hostname, root_db, port, template_db\n):\n \"\"\"Create database, install Mathesar on it, add demo data.\"\"\"\n user_db_engine = create_future_engine(\n username, password, hostname, user_db, port\n )\n try:\n user_db_engine.connect()\n user_db_engine.dispose()\n print(f\"Database {user_db} already exists! Skipping...\")\n except OperationalError:\n root_db_engine = create_future_engine(\n username, password, hostname, root_db, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {user_db} TEMPLATE {template_db};\"))\n root_db_engine.dispose()\n user_db_engine.dispose()\n print(f\"Created DB is {user_db}.\")\n", "path": "demo/install.py"}]} | 2,058 | 618 |
gh_patches_debug_51560 | rasdani/github-patches | git_diff | ray-project__ray-10593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the multi-worker feature for Java worker experimental
Right now, the multi-worker feature for Java worker is enabled by default, but the `ActorHandle::kill()` API doesn't work well if multi-worker is enabled because it will kill the whole process instead of one worker in the process.
To avoid complaints from Java users, we should disable the multi-worker feature by default, but we still enable it in unit test.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/job_config.py`
Content:
```
1 import ray
2
3
4 class JobConfig:
5 """A class used to store the configurations of a job.
6
7 Attributes:
8 worker_env (dict): Environment variables to be set on worker
9 processes.
10 num_java_workers_per_process (int): The number of java workers per
11 worker process.
12 jvm_options (str[]): The jvm options for java workers of the job.
13 """
14
15 def __init__(
16 self,
17 worker_env=None,
18 num_java_workers_per_process=10,
19 jvm_options=None,
20 ):
21 if worker_env is None:
22 self.worker_env = dict()
23 else:
24 self.worker_env = worker_env
25 self.num_java_workers_per_process = num_java_workers_per_process
26 if jvm_options is None:
27 self.jvm_options = []
28 else:
29 self.jvm_options = jvm_options
30
31 def serialize(self):
32 job_config = ray.gcs_utils.JobConfig()
33 for key in self.worker_env:
34 job_config.worker_env[key] = self.worker_env[key]
35 job_config.num_java_workers_per_process = (
36 self.num_java_workers_per_process)
37 job_config.jvm_options.extend(self.jvm_options)
38 return job_config.SerializeToString()
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/job_config.py b/python/ray/job_config.py
--- a/python/ray/job_config.py
+++ b/python/ray/job_config.py
@@ -15,7 +15,7 @@
def __init__(
self,
worker_env=None,
- num_java_workers_per_process=10,
+ num_java_workers_per_process=1,
jvm_options=None,
):
if worker_env is None:
| {"golden_diff": "diff --git a/python/ray/job_config.py b/python/ray/job_config.py\n--- a/python/ray/job_config.py\n+++ b/python/ray/job_config.py\n@@ -15,7 +15,7 @@\n def __init__(\n self,\n worker_env=None,\n- num_java_workers_per_process=10,\n+ num_java_workers_per_process=1,\n jvm_options=None,\n ):\n if worker_env is None:\n", "issue": "Make the multi-worker feature for Java worker experimental\nRight now, the multi-worker feature for Java worker is enabled by default, but the `ActorHandle::kill()` API doesn't work well if multi-worker is enabled because it will kill the whole process instead of one worker in the process.\r\n\r\nTo avoid complaints from Java users, we should disable the multi-worker feature by default, but we still enable it in unit test.\n", "before_files": [{"content": "import ray\n\n\nclass JobConfig:\n \"\"\"A class used to store the configurations of a job.\n\n Attributes:\n worker_env (dict): Environment variables to be set on worker\n processes.\n num_java_workers_per_process (int): The number of java workers per\n worker process.\n jvm_options (str[]): The jvm options for java workers of the job.\n \"\"\"\n\n def __init__(\n self,\n worker_env=None,\n num_java_workers_per_process=10,\n jvm_options=None,\n ):\n if worker_env is None:\n self.worker_env = dict()\n else:\n self.worker_env = worker_env\n self.num_java_workers_per_process = num_java_workers_per_process\n if jvm_options is None:\n self.jvm_options = []\n else:\n self.jvm_options = jvm_options\n\n def serialize(self):\n job_config = ray.gcs_utils.JobConfig()\n for key in self.worker_env:\n job_config.worker_env[key] = self.worker_env[key]\n job_config.num_java_workers_per_process = (\n self.num_java_workers_per_process)\n job_config.jvm_options.extend(self.jvm_options)\n return job_config.SerializeToString()\n", "path": "python/ray/job_config.py"}], "after_files": [{"content": "import ray\n\n\nclass JobConfig:\n \"\"\"A class used to store the configurations of a job.\n\n Attributes:\n worker_env (dict): Environment variables to be set on worker\n processes.\n num_java_workers_per_process (int): The number of java workers per\n worker process.\n jvm_options (str[]): The jvm options for java workers of the job.\n \"\"\"\n\n def __init__(\n self,\n worker_env=None,\n num_java_workers_per_process=1,\n jvm_options=None,\n ):\n if worker_env is None:\n self.worker_env = dict()\n else:\n self.worker_env = worker_env\n self.num_java_workers_per_process = num_java_workers_per_process\n if jvm_options is None:\n self.jvm_options = []\n else:\n self.jvm_options = jvm_options\n\n def serialize(self):\n job_config = ray.gcs_utils.JobConfig()\n for key in self.worker_env:\n job_config.worker_env[key] = self.worker_env[key]\n job_config.num_java_workers_per_process = (\n self.num_java_workers_per_process)\n job_config.jvm_options.extend(self.jvm_options)\n return job_config.SerializeToString()\n", "path": "python/ray/job_config.py"}]} | 670 | 97 |
gh_patches_debug_28555 | rasdani/github-patches | git_diff | archlinux__archinstall-418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AMD graphics driver selection unclear
For AMD, the options are by no means obvious in what they actually do.
The recommended choice should in my opinion be mesa (packages mesa and radeon-vulkan).
If you want to go by vendor in the first table, then mesa should be moved into the secondary table, i.e. when selecting AMD you get the choice between AMDVLK and mesa. The current situation is just confusing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/hardware.py`
Content:
```
1 import os, subprocess, json
2 from .general import sys_command
3 from .networking import list_interfaces, enrichIfaceTypes
4 from typing import Optional
5
6 __packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']
7
8 AVAILABLE_GFX_DRIVERS = {
9 # Sub-dicts are layer-2 options to be selected
10 # and lists are a list of packages to be installed
11 'AMD / ATI' : {
12 'amd' : ['xf86-video-amdgpu'],
13 'ati' : ['xf86-video-ati']
14 },
15 'intel' : ['xf86-video-intel'],
16 'nvidia' : {
17 'open-source' : ['xf86-video-nouveau'],
18 'proprietary' : ['nvidia']
19 },
20 'mesa' : ['mesa'],
21 'fbdev' : ['xf86-video-fbdev'],
22 'vesa' : ['xf86-video-vesa'],
23 'vmware / virtualbox' : ['xf86-video-vmware']
24 }
25
26 def hasWifi()->bool:
27 return 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()
28
29 def hasAMDCPU()->bool:
30 if subprocess.check_output("lscpu | grep AMD", shell=True).strip().decode():
31 return True
32 return False
33 def hasIntelCPU()->bool:
34 if subprocess.check_output("lscpu | grep Intel", shell=True).strip().decode():
35 return True
36 return False
37
38 def hasUEFI()->bool:
39 return os.path.isdir('/sys/firmware/efi')
40
41 def graphicsDevices()->dict:
42 cards = {}
43 for line in sys_command(f"lspci"):
44 if b' VGA ' in line:
45 _, identifier = line.split(b': ',1)
46 cards[identifier.strip().lower().decode('UTF-8')] = line
47 return cards
48
49 def hasNvidiaGraphics()->bool:
50 return any('nvidia' in x for x in graphicsDevices())
51
52 def hasAmdGraphics()->bool:
53 return any('amd' in x for x in graphicsDevices())
54
55 def hasIntelGraphics()->bool:
56 return any('intel' in x for x in graphicsDevices())
57
58
59 def cpuVendor()-> Optional[str]:
60 cpu_info = json.loads(subprocess.check_output("lscpu -J", shell=True).decode('utf-8'))['lscpu']
61 for info in cpu_info:
62 if info.get('field',None):
63 if info.get('field',None) == "Vendor ID:":
64 return info.get('data',None)
65
66 def isVM() -> bool:
67 try:
68 subprocess.check_call(["systemd-detect-virt"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine
69 return True
70 except:
71 return False
72
73 # TODO: Add more identifiers
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py
--- a/archinstall/lib/hardware.py
+++ b/archinstall/lib/hardware.py
@@ -3,24 +3,53 @@
from .networking import list_interfaces, enrichIfaceTypes
from typing import Optional
-__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']
+__packages__ = [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "xf86-video-nouveau",
+ "xf86-video-vmware",
+ "libva-mesa-driver",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-radeon",
+ "vulkan-intel",
+ "nvidia",
+]
AVAILABLE_GFX_DRIVERS = {
# Sub-dicts are layer-2 options to be selected
# and lists are a list of packages to be installed
- 'AMD / ATI' : {
- 'amd' : ['xf86-video-amdgpu'],
- 'ati' : ['xf86-video-ati']
+ "All open-source (default)": [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "xf86-video-nouveau",
+ "xf86-video-vmware",
+ "libva-mesa-driver",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-radeon",
+ "vulkan-intel",
+ ],
+ "AMD / ATI (open-source)": [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "libva-mesa-driver",
+ "vulkan-radeon",
+ ],
+ "Intel (open-source)": [
+ "mesa",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-intel",
+ ],
+ "Nvidia": {
+ "open-source": ["mesa", "xf86-video-nouveau", "libva-mesa-driver"],
+ "proprietary": ["nvidia"],
},
- 'intel' : ['xf86-video-intel'],
- 'nvidia' : {
- 'open-source' : ['xf86-video-nouveau'],
- 'proprietary' : ['nvidia']
- },
- 'mesa' : ['mesa'],
- 'fbdev' : ['xf86-video-fbdev'],
- 'vesa' : ['xf86-video-vesa'],
- 'vmware / virtualbox' : ['xf86-video-vmware']
+ "VMware / VirtualBox (open-source)": ["mesa", "xf86-video-vmware"],
}
def hasWifi()->bool:
| {"golden_diff": "diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py\n--- a/archinstall/lib/hardware.py\n+++ b/archinstall/lib/hardware.py\n@@ -3,24 +3,53 @@\n from .networking import list_interfaces, enrichIfaceTypes\n from typing import Optional\n \n-__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']\n+__packages__ = [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"xf86-video-nouveau\",\n+\t\t\"xf86-video-vmware\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-radeon\",\n+\t\t\"vulkan-intel\",\n+\t\t\"nvidia\",\n+]\n \n AVAILABLE_GFX_DRIVERS = {\n \t# Sub-dicts are layer-2 options to be selected\n \t# and lists are a list of packages to be installed\n-\t'AMD / ATI' : {\n-\t\t'amd' : ['xf86-video-amdgpu'],\n-\t\t'ati' : ['xf86-video-ati']\n+\t\"All open-source (default)\": [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"xf86-video-nouveau\",\n+\t\t\"xf86-video-vmware\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-radeon\",\n+\t\t\"vulkan-intel\",\n+\t],\n+\t\"AMD / ATI (open-source)\": [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"vulkan-radeon\",\n+\t],\n+\t\"Intel (open-source)\": [\n+\t\t\"mesa\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-intel\",\n+\t],\n+\t\"Nvidia\": {\n+\t\t\"open-source\": [\"mesa\", \"xf86-video-nouveau\", \"libva-mesa-driver\"],\n+\t\t\"proprietary\": [\"nvidia\"],\n \t},\n-\t'intel' : ['xf86-video-intel'],\n-\t'nvidia' : {\n-\t\t'open-source' : ['xf86-video-nouveau'],\n-\t\t'proprietary' : ['nvidia']\n-\t},\n-\t'mesa' : ['mesa'],\n-\t'fbdev' : ['xf86-video-fbdev'],\n-\t'vesa' : ['xf86-video-vesa'],\n-\t'vmware / virtualbox' : ['xf86-video-vmware']\n+\t\"VMware / VirtualBox (open-source)\": [\"mesa\", \"xf86-video-vmware\"],\n }\n \n def hasWifi()->bool:\n", "issue": "AMD graphics driver selection unclear\nFor AMD, the options are by no means obvious in what they actually do.\r\n\r\nThe recommended choice should in my opinion be mesa (packages mesa and radeon-vulkan).\r\nIf you want to go by vendor in the first table, then mesa should be moved into the secondary table, i.e. when selecting AMD you get the choice between AMDVLK and mesa. The current situation is just confusing.\n", "before_files": [{"content": "import os, subprocess, json\nfrom .general import sys_command\nfrom .networking import list_interfaces, enrichIfaceTypes\nfrom typing import Optional\n\n__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']\n\nAVAILABLE_GFX_DRIVERS = {\n\t# Sub-dicts are layer-2 options to be selected\n\t# and lists are a list of packages to be installed\n\t'AMD / ATI' : {\n\t\t'amd' : ['xf86-video-amdgpu'],\n\t\t'ati' : ['xf86-video-ati']\n\t},\n\t'intel' : ['xf86-video-intel'],\n\t'nvidia' : {\n\t\t'open-source' : ['xf86-video-nouveau'],\n\t\t'proprietary' : ['nvidia']\n\t},\n\t'mesa' : ['mesa'],\n\t'fbdev' : ['xf86-video-fbdev'],\n\t'vesa' : ['xf86-video-vesa'],\n\t'vmware / virtualbox' : ['xf86-video-vmware']\n}\n\ndef hasWifi()->bool:\n\treturn 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()\n\ndef hasAMDCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep AMD\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\ndef hasIntelCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep Intel\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\n\ndef hasUEFI()->bool:\n\treturn os.path.isdir('/sys/firmware/efi')\n\ndef graphicsDevices()->dict:\n\tcards = {}\n\tfor line in sys_command(f\"lspci\"):\n\t\tif b' VGA ' in line:\n\t\t\t_, identifier = line.split(b': ',1)\n\t\t\tcards[identifier.strip().lower().decode('UTF-8')] = line\n\treturn cards\n\ndef hasNvidiaGraphics()->bool:\n\treturn any('nvidia' in x for x in graphicsDevices())\n\ndef hasAmdGraphics()->bool:\n\treturn any('amd' in x for x in graphicsDevices())\n\ndef hasIntelGraphics()->bool:\n\treturn any('intel' in x for x in graphicsDevices())\n\n\ndef cpuVendor()-> Optional[str]:\n\tcpu_info = json.loads(subprocess.check_output(\"lscpu -J\", shell=True).decode('utf-8'))['lscpu']\n\tfor info in cpu_info:\n\t\tif info.get('field',None):\n\t\t\tif info.get('field',None) == \"Vendor ID:\":\n\t\t\t\treturn info.get('data',None)\n\ndef isVM() -> bool:\n\ttry:\n\t\tsubprocess.check_call([\"systemd-detect-virt\"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine\n\t\treturn True\n\texcept:\n\t\treturn False\n\n# TODO: Add more identifiers\n", "path": "archinstall/lib/hardware.py"}], "after_files": [{"content": "import os, subprocess, json\nfrom .general import sys_command\nfrom .networking import list_interfaces, enrichIfaceTypes\nfrom typing import Optional\n\n__packages__ = [\n\t\t\"mesa\",\n\t\t\"xf86-video-amdgpu\",\n\t\t\"xf86-video-ati\",\n\t\t\"xf86-video-nouveau\",\n\t\t\"xf86-video-vmware\",\n\t\t\"libva-mesa-driver\",\n\t\t\"libva-intel-driver\",\n\t\t\"intel-media-driver\",\n\t\t\"vulkan-radeon\",\n\t\t\"vulkan-intel\",\n\t\t\"nvidia\",\n]\n\nAVAILABLE_GFX_DRIVERS = {\n\t# Sub-dicts are layer-2 options to be selected\n\t# and lists are a list of packages to be installed\n\t\"All open-source (default)\": [\n\t\t\"mesa\",\n\t\t\"xf86-video-amdgpu\",\n\t\t\"xf86-video-ati\",\n\t\t\"xf86-video-nouveau\",\n\t\t\"xf86-video-vmware\",\n\t\t\"libva-mesa-driver\",\n\t\t\"libva-intel-driver\",\n\t\t\"intel-media-driver\",\n\t\t\"vulkan-radeon\",\n\t\t\"vulkan-intel\",\n\t],\n\t\"AMD / ATI (open-source)\": [\n\t\t\"mesa\",\n\t\t\"xf86-video-amdgpu\",\n\t\t\"xf86-video-ati\",\n\t\t\"libva-mesa-driver\",\n\t\t\"vulkan-radeon\",\n\t],\n\t\"Intel (open-source)\": [\n\t\t\"mesa\",\n\t\t\"libva-intel-driver\",\n\t\t\"intel-media-driver\",\n\t\t\"vulkan-intel\",\n\t],\n\t\"Nvidia\": {\n\t\t\"open-source\": [\"mesa\", \"xf86-video-nouveau\", \"libva-mesa-driver\"],\n\t\t\"proprietary\": [\"nvidia\"],\n\t},\n\t\"VMware / VirtualBox (open-source)\": [\"mesa\", \"xf86-video-vmware\"],\n}\n\ndef hasWifi()->bool:\n\treturn 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()\n\ndef hasAMDCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep AMD\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\ndef hasIntelCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep Intel\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\n\ndef hasUEFI()->bool:\n\treturn os.path.isdir('/sys/firmware/efi')\n\ndef graphicsDevices()->dict:\n\tcards = {}\n\tfor line in sys_command(f\"lspci\"):\n\t\tif b' VGA ' in line:\n\t\t\t_, identifier = line.split(b': ',1)\n\t\t\tcards[identifier.strip().lower().decode('UTF-8')] = line\n\treturn cards\n\ndef hasNvidiaGraphics()->bool:\n\treturn any('nvidia' in x for x in graphicsDevices())\n\ndef hasAmdGraphics()->bool:\n\treturn any('amd' in x for x in graphicsDevices())\n\ndef hasIntelGraphics()->bool:\n\treturn any('intel' in x for x in graphicsDevices())\n\n\ndef cpuVendor()-> Optional[str]:\n\tcpu_info = json.loads(subprocess.check_output(\"lscpu -J\", shell=True).decode('utf-8'))['lscpu']\n\tfor info in cpu_info:\n\t\tif info.get('field',None):\n\t\t\tif info.get('field',None) == \"Vendor ID:\":\n\t\t\t\treturn info.get('data',None)\n\ndef isVM() -> bool:\n\ttry:\n\t\tsubprocess.check_call([\"systemd-detect-virt\"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine\n\t\treturn True\n\texcept:\n\t\treturn False\n\n# TODO: Add more identifiers\n", "path": "archinstall/lib/hardware.py"}]} | 1,178 | 744 |
gh_patches_debug_43973 | rasdani/github-patches | git_diff | fossasia__open-event-server-4223 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Email for session submission, approval and rejection
Parent Issue #4196
Emails for following are to be done in this issue:
- [x] Email for session submission to Event Organizer
- [x] Email for session acceptance/rejection to Speaker
- [x] Email for session acceptance/rejection to Organizer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/mail.py`
Content:
```
1 from datetime import datetime
2
3 from flask import current_app
4
5 from app import get_settings
6 from app.api.helpers.db import save_to_db
7 from app.api.helpers.log import record_activity
8 from app.api.helpers.system_mails import MAILS
9 from app.api.helpers.utilities import string_empty
10 from app.models.mail import Mail, USER_CONFIRM, NEW_SESSION
11
12
13 def send_email(to, action, subject, html):
14 """
15 Sends email and records it in DB
16 """
17 if not string_empty(to):
18 email_service = get_settings()['email_service']
19 email_from_name = get_settings()['email_from_name']
20 if email_service == 'smtp':
21 email_from = email_from_name + '<' + get_settings()['email_from'] + '>'
22 else:
23 email_from = get_settings()['email_from']
24 payload = {
25 'to': to,
26 'from': email_from,
27 'subject': subject,
28 'html': html
29 }
30
31 if not current_app.config['TESTING']:
32 if email_service == 'smtp':
33 smtp_encryption = get_settings()['smtp_encryption']
34 if smtp_encryption == 'tls':
35 smtp_encryption = 'required'
36 elif smtp_encryption == 'ssl':
37 smtp_encryption = 'ssl'
38 elif smtp_encryption == 'tls_optional':
39 smtp_encryption = 'optional'
40 else:
41 smtp_encryption = 'none'
42
43 config = {
44 'host': get_settings()['smtp_host'],
45 'username': get_settings()['smtp_username'],
46 'password': get_settings()['smtp_password'],
47 'encryption': smtp_encryption,
48 'port': get_settings()['smtp_port'],
49 }
50
51 from tasks import send_mail_via_smtp_task
52 send_mail_via_smtp_task.delay(config, payload)
53 else:
54 payload['fromname'] = email_from_name
55 key = get_settings()['sendgrid_key']
56 if not key and not current_app.config['TESTING']:
57 print('Sendgrid key not defined')
58 return
59 headers = {
60 "Authorization": ("Bearer " + key)
61 }
62 from tasks import send_email_task
63 send_email_task.delay(payload, headers)
64
65 # record_mail(to, action, subject, html)
66 mail = Mail(
67 recipient=to, action=action, subject=subject,
68 message=html, time=datetime.utcnow()
69 )
70
71 save_to_db(mail, 'Mail Recorded')
72 record_activity('mail_event', email=to, action=action, subject=subject)
73 return True
74
75
76 def send_email_confirmation(email, link):
77 """account confirmation"""
78 send_email(
79 to=email,
80 action=USER_CONFIRM,
81 subject=MAILS[USER_CONFIRM]['subject'],
82 html=MAILS[USER_CONFIRM]['message'].format(
83 email=email, link=link
84 )
85 )
86
87
88 def send_email_new_session(email, event_name, link):
89 """email for new session"""
90 send_email(
91 to=email,
92 action=NEW_SESSION,
93 subject=MAILS[NEW_SESSION]['subject'].format(
94 event_name=event_name
95 ),
96 html=MAILS[NEW_SESSION]['message'].format(
97 email=email,
98 event_name=event_name,
99 link=link
100 )
101 )
102
```
Path: `app/api/sessions.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from marshmallow_jsonapi import fields
4 from marshmallow import validates_schema
5 import marshmallow.validate as validate
6
7 from app.api.bootstrap import api
8 from app.api.events import Event
9 from app.api.helpers.utilities import dasherize
10 from app.models import db
11 from app.settings import get_settings
12 from app.models.session import Session
13 from app.models.track import Track
14 from app.models.speaker import Speaker
15 from app.models.session_type import SessionType
16 from app.models.microlocation import Microlocation
17 from app.api.helpers.exceptions import UnprocessableEntity
18 from app.api.helpers.db import safe_query
19 from app.api.helpers.utilities import require_relationship
20 from app.api.helpers.permission_manager import has_access
21 from app.api.helpers.exceptions import ForbiddenException
22 from app.api.helpers.permissions import current_identity
23 from app.api.helpers.mail import send_email_new_session
24
25
26 class SessionSchema(Schema):
27 """
28 Api schema for Session Model
29 """
30
31 class Meta:
32 """
33 Meta class for Session Api Schema
34 """
35 type_ = 'session'
36 self_view = 'v1.session_detail'
37 self_view_kwargs = {'id': '<id>'}
38 inflect = dasherize
39
40 @validates_schema(pass_original=True)
41 def validate_date(self, data, original_data):
42 if 'id' in original_data['data']:
43 session = Session.query.filter_by(id=original_data['data']['id']).one()
44
45 if 'starts_at' not in data:
46 data['starts_at'] = session.starts_at
47
48 if 'ends_at' not in data:
49 data['ends_at'] = session.ends_at
50
51 if data['starts_at'] >= data['ends_at']:
52 raise UnprocessableEntity({'pointer': '/data/attributes/ends-at'}, "ends-at should be after starts-at")
53
54 if 'state' in data:
55 if data['state'] is not 'draft' or not 'pending':
56 if not has_access('is_coorganizer', event_id=data['event']):
57 return ForbiddenException({'source': ''}, 'Co-organizer access is required.')
58
59 if 'track' in data:
60 if not has_access('is_coorganizer', event_id=data['event']):
61 return ForbiddenException({'source': ''}, 'Co-organizer access is required.')
62
63 if 'microlocation' in data:
64 if not has_access('is_coorganizer', event_id=data['event']):
65 return ForbiddenException({'source': ''}, 'Co-organizer access is required.')
66
67 id = fields.Str(dump_only=True)
68 title = fields.Str(required=True)
69 subtitle = fields.Str(allow_none=True)
70 level = fields.Int(allow_none=True)
71 short_abstract = fields.Str(allow_none=True)
72 long_abstract = fields.Str(allow_none=True)
73 comments = fields.Str(allow_none=True)
74 starts_at = fields.DateTime(required=True)
75 ends_at = fields.DateTime(required=True)
76 language = fields.Str(allow_none=True)
77 slides_url = fields.Url(allow_none=True)
78 video_url = fields.Url(allow_none=True)
79 audio_url = fields.Url(allow_none=True)
80 signup_url = fields.Url(allow_none=True)
81 state = fields.Str(validate=validate.OneOf(choices=["pending", "accepted", "confirmed", "rejected", "draft"]),
82 allow_none=True, default='draft')
83 created_at = fields.DateTime(dump_only=True)
84 deleted_at = fields.DateTime(dump_only=True)
85 submitted_at = fields.DateTime(allow_none=True)
86 is_mail_sent = fields.Boolean()
87 microlocation = Relationship(attribute='microlocation',
88 self_view='v1.session_microlocation',
89 self_view_kwargs={'id': '<id>'},
90 related_view='v1.microlocation_detail',
91 related_view_kwargs={'session_id': '<id>'},
92 schema='MicrolocationSchema',
93 type_='microlocation')
94 track = Relationship(attribute='track',
95 self_view='v1.session_track',
96 self_view_kwargs={'id': '<id>'},
97 related_view='v1.track_detail',
98 related_view_kwargs={'session_id': '<id>'},
99 schema='TrackSchema',
100 type_='track')
101 session_type = Relationship(attribute='session_type',
102 self_view='v1.session_session_type',
103 self_view_kwargs={'id': '<id>'},
104 related_view='v1.session_type_detail',
105 related_view_kwargs={'session_id': '<id>'},
106 schema='SessionTypeSchema',
107 type_='session-type')
108 event = Relationship(attribute='event',
109 self_view='v1.session_event',
110 self_view_kwargs={'id': '<id>'},
111 related_view='v1.event_detail',
112 related_view_kwargs={'session_id': '<id>'},
113 schema='EventSchema',
114 type_='event')
115 speakers = Relationship(
116 attribute='speakers',
117 self_view='v1.session_speaker',
118 self_view_kwargs={'id': '<id>'},
119 related_view='v1.speaker_list',
120 related_view_kwargs={'session_id': '<id>'},
121 schema='SpeakerSchema',
122 type_='speaker')
123
124
125 class SessionListPost(ResourceList):
126 """
127 List Sessions
128 """
129 def before_post(self, args, kwargs, data):
130 require_relationship(['event'], data)
131 data['creator_id'] = current_identity.id
132
133 def after_create_object(self, session, data, view_kwargs):
134 if session.event.get_organizer():
135 event_name = session.event.name
136 organizer_email = session.event.get_organizer().email
137 frontend_url = get_settings()['frontend_url']
138 link = "{}/{}/sessions/{}"\
139 .format(frontend_url, session.event_id, session.id)
140 send_email_new_session(organizer_email, event_name, link)
141
142 decorators = (api.has_permission('create_event'),)
143 schema = SessionSchema
144 data_layer = {'session': db.session,
145 'model': Session,
146 'methods': {'after_create_object': after_create_object
147 }}
148
149
150 class SessionList(ResourceList):
151 """
152 List Sessions
153 """
154
155 def query(self, view_kwargs):
156 query_ = self.session.query(Session)
157 if view_kwargs.get('track_id') is not None:
158 track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')
159 query_ = query_.join(Track).filter(Track.id == track.id)
160 if view_kwargs.get('session_type_id') is not None:
161 session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')
162 query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)
163 if view_kwargs.get('microlocation_id') is not None:
164 microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')
165 query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)
166 if view_kwargs.get('event_id'):
167 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
168 query_ = query_.join(Event).filter(Event.id == event.id)
169 elif view_kwargs.get('event_identifier'):
170 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
171 query_ = query_.join(Event).filter(Event.identifier == event.id)
172 if view_kwargs.get('speaker_id'):
173 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
174 # session-speaker :: many-to-many relationship
175 query_ = Session.query.filter(Session.speakers.any(id=speaker.id))
176 return query_
177
178 view_kwargs = True
179 methods = ['GET']
180 schema = SessionSchema
181 data_layer = {'session': db.session,
182 'model': Session,
183 'methods': {
184 'query': query
185 }}
186
187
188 class SessionDetail(ResourceDetail):
189 """
190 Session detail by id
191 """
192 def before_get_object(self, view_kwargs):
193 if view_kwargs.get('event_identifier'):
194 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')
195 view_kwargs['event_id'] = event.id
196
197 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
198 schema = SessionSchema
199 data_layer = {'session': db.session,
200 'model': Session,
201 'methods': {'before_get_object': before_get_object}}
202
203
204 class SessionRelationshipRequired(ResourceRelationship):
205 """
206 Session Relationship
207 """
208 schema = SessionSchema
209 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
210 methods = ['GET', 'PATCH']
211 data_layer = {'session': db.session,
212 'model': Session}
213
214
215 class SessionRelationshipOptional(ResourceRelationship):
216 """
217 Session Relationship
218 """
219 schema = SessionSchema
220 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
221 data_layer = {'session': db.session,
222 'model': Session}
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/helpers/mail.py b/app/api/helpers/mail.py
--- a/app/api/helpers/mail.py
+++ b/app/api/helpers/mail.py
@@ -7,7 +7,7 @@
from app.api.helpers.log import record_activity
from app.api.helpers.system_mails import MAILS
from app.api.helpers.utilities import string_empty
-from app.models.mail import Mail, USER_CONFIRM, NEW_SESSION
+from app.models.mail import Mail, USER_CONFIRM, NEW_SESSION, SESSION_ACCEPT_REJECT
def send_email(to, action, subject, html):
@@ -99,3 +99,23 @@
link=link
)
)
+
+
+def send_email_session_accept_reject(email, session, link):
+ """email for new session"""
+ session_name = session.title
+ session_acceptance = session.state
+ send_email(
+ to=email,
+ action=SESSION_ACCEPT_REJECT,
+ subject=MAILS[SESSION_ACCEPT_REJECT]['subject'].format(
+ session_name=session_name,
+ acceptance=session_acceptance
+ ),
+ html=MAILS[SESSION_ACCEPT_REJECT]['message'].format(
+ email=email,
+ session_name=session_name,
+ acceptance=session_acceptance,
+ link=link
+ )
+ )
diff --git a/app/api/sessions.py b/app/api/sessions.py
--- a/app/api/sessions.py
+++ b/app/api/sessions.py
@@ -20,7 +20,7 @@
from app.api.helpers.permission_manager import has_access
from app.api.helpers.exceptions import ForbiddenException
from app.api.helpers.permissions import current_identity
-from app.api.helpers.mail import send_email_new_session
+from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject
class SessionSchema(Schema):
@@ -48,6 +48,9 @@
if 'ends_at' not in data:
data['ends_at'] = session.ends_at
+ if 'event' not in data:
+ data['event'] = session.event_id
+
if data['starts_at'] >= data['ends_at']:
raise UnprocessableEntity({'pointer': '/data/attributes/ends-at'}, "ends-at should be after starts-at")
@@ -135,7 +138,7 @@
event_name = session.event.name
organizer_email = session.event.get_organizer().email
frontend_url = get_settings()['frontend_url']
- link = "{}/{}/sessions/{}"\
+ link = "{}/events/{}/sessions/{}"\
.format(frontend_url, session.event_id, session.id)
send_email_new_session(organizer_email, event_name, link)
@@ -194,11 +197,32 @@
event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')
view_kwargs['event_id'] = event.id
+ def after_update_object(self, session, data, view_kwargs):
+ """ Send email if session accepted or rejected """
+ if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):
+ # Email for speaker
+ speakers = session.speakers
+ for speaker in speakers:
+ frontend_url = get_settings()['frontend_url']
+ link = "{}/events/{}/sessions/{}" \
+ .format(frontend_url, session.event_id, session.id)
+ send_email_session_accept_reject(speaker.email, session, link)
+
+ # Email for organizer
+ if session.event.get_organizer():
+ organizer_email = session.event.get_organizer().email
+ frontend_url = get_settings()['frontend_url']
+ link = "{}/events/{}/sessions/{}" \
+ .format(frontend_url, session.event_id, session.id)
+ send_email_session_accept_reject(organizer_email, session,
+ link)
+
decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
schema = SessionSchema
data_layer = {'session': db.session,
'model': Session,
- 'methods': {'before_get_object': before_get_object}}
+ 'methods': {'before_get_object': before_get_object,
+ 'after_update_object': after_update_object}}
class SessionRelationshipRequired(ResourceRelationship):
| {"golden_diff": "diff --git a/app/api/helpers/mail.py b/app/api/helpers/mail.py\n--- a/app/api/helpers/mail.py\n+++ b/app/api/helpers/mail.py\n@@ -7,7 +7,7 @@\n from app.api.helpers.log import record_activity\n from app.api.helpers.system_mails import MAILS\n from app.api.helpers.utilities import string_empty\n-from app.models.mail import Mail, USER_CONFIRM, NEW_SESSION\n+from app.models.mail import Mail, USER_CONFIRM, NEW_SESSION, SESSION_ACCEPT_REJECT\n \n \n def send_email(to, action, subject, html):\n@@ -99,3 +99,23 @@\n link=link\n )\n )\n+\n+\n+def send_email_session_accept_reject(email, session, link):\n+ \"\"\"email for new session\"\"\"\n+ session_name = session.title\n+ session_acceptance = session.state\n+ send_email(\n+ to=email,\n+ action=SESSION_ACCEPT_REJECT,\n+ subject=MAILS[SESSION_ACCEPT_REJECT]['subject'].format(\n+ session_name=session_name,\n+ acceptance=session_acceptance\n+ ),\n+ html=MAILS[SESSION_ACCEPT_REJECT]['message'].format(\n+ email=email,\n+ session_name=session_name,\n+ acceptance=session_acceptance,\n+ link=link\n+ )\n+ )\ndiff --git a/app/api/sessions.py b/app/api/sessions.py\n--- a/app/api/sessions.py\n+++ b/app/api/sessions.py\n@@ -20,7 +20,7 @@\n from app.api.helpers.permission_manager import has_access\n from app.api.helpers.exceptions import ForbiddenException\n from app.api.helpers.permissions import current_identity\n-from app.api.helpers.mail import send_email_new_session\n+from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\n \n \n class SessionSchema(Schema):\n@@ -48,6 +48,9 @@\n if 'ends_at' not in data:\n data['ends_at'] = session.ends_at\n \n+ if 'event' not in data:\n+ data['event'] = session.event_id\n+\n if data['starts_at'] >= data['ends_at']:\n raise UnprocessableEntity({'pointer': '/data/attributes/ends-at'}, \"ends-at should be after starts-at\")\n \n@@ -135,7 +138,7 @@\n event_name = session.event.name\n organizer_email = session.event.get_organizer().email\n frontend_url = get_settings()['frontend_url']\n- link = \"{}/{}/sessions/{}\"\\\n+ link = \"{}/events/{}/sessions/{}\"\\\n .format(frontend_url, session.event_id, session.id)\n send_email_new_session(organizer_email, event_name, link)\n \n@@ -194,11 +197,32 @@\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n \n+ def after_update_object(self, session, data, view_kwargs):\n+ \"\"\" Send email if session accepted or rejected \"\"\"\n+ if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):\n+ # Email for speaker\n+ speakers = session.speakers\n+ for speaker in speakers:\n+ frontend_url = get_settings()['frontend_url']\n+ link = \"{}/events/{}/sessions/{}\" \\\n+ .format(frontend_url, session.event_id, session.id)\n+ send_email_session_accept_reject(speaker.email, session, link)\n+\n+ # Email for organizer\n+ if session.event.get_organizer():\n+ organizer_email = session.event.get_organizer().email\n+ frontend_url = get_settings()['frontend_url']\n+ link = \"{}/events/{}/sessions/{}\" \\\n+ .format(frontend_url, session.event_id, session.id)\n+ send_email_session_accept_reject(organizer_email, session,\n+ link)\n+\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n- 'methods': {'before_get_object': before_get_object}}\n+ 'methods': {'before_get_object': before_get_object,\n+ 'after_update_object': after_update_object}}\n \n \n class SessionRelationshipRequired(ResourceRelationship):\n", "issue": "Email for session submission, approval and rejection\nParent Issue #4196\r\n\r\nEmails for following are to be done in this issue:\r\n\r\n- [x] Email for session submission to Event Organizer\r\n- [x] Email for session acceptance/rejection to Speaker\r\n- [x] Email for session acceptance/rejection to Organizer\n", "before_files": [{"content": "from datetime import datetime\n\nfrom flask import current_app\n\nfrom app import get_settings\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.log import record_activity\nfrom app.api.helpers.system_mails import MAILS\nfrom app.api.helpers.utilities import string_empty\nfrom app.models.mail import Mail, USER_CONFIRM, NEW_SESSION\n\n\ndef send_email(to, action, subject, html):\n \"\"\"\n Sends email and records it in DB\n \"\"\"\n if not string_empty(to):\n email_service = get_settings()['email_service']\n email_from_name = get_settings()['email_from_name']\n if email_service == 'smtp':\n email_from = email_from_name + '<' + get_settings()['email_from'] + '>'\n else:\n email_from = get_settings()['email_from']\n payload = {\n 'to': to,\n 'from': email_from,\n 'subject': subject,\n 'html': html\n }\n\n if not current_app.config['TESTING']:\n if email_service == 'smtp':\n smtp_encryption = get_settings()['smtp_encryption']\n if smtp_encryption == 'tls':\n smtp_encryption = 'required'\n elif smtp_encryption == 'ssl':\n smtp_encryption = 'ssl'\n elif smtp_encryption == 'tls_optional':\n smtp_encryption = 'optional'\n else:\n smtp_encryption = 'none'\n\n config = {\n 'host': get_settings()['smtp_host'],\n 'username': get_settings()['smtp_username'],\n 'password': get_settings()['smtp_password'],\n 'encryption': smtp_encryption,\n 'port': get_settings()['smtp_port'],\n }\n\n from tasks import send_mail_via_smtp_task\n send_mail_via_smtp_task.delay(config, payload)\n else:\n payload['fromname'] = email_from_name\n key = get_settings()['sendgrid_key']\n if not key and not current_app.config['TESTING']:\n print('Sendgrid key not defined')\n return\n headers = {\n \"Authorization\": (\"Bearer \" + key)\n }\n from tasks import send_email_task\n send_email_task.delay(payload, headers)\n\n # record_mail(to, action, subject, html)\n mail = Mail(\n recipient=to, action=action, subject=subject,\n message=html, time=datetime.utcnow()\n )\n\n save_to_db(mail, 'Mail Recorded')\n record_activity('mail_event', email=to, action=action, subject=subject)\n return True\n\n\ndef send_email_confirmation(email, link):\n \"\"\"account confirmation\"\"\"\n send_email(\n to=email,\n action=USER_CONFIRM,\n subject=MAILS[USER_CONFIRM]['subject'],\n html=MAILS[USER_CONFIRM]['message'].format(\n email=email, link=link\n )\n )\n\n\ndef send_email_new_session(email, event_name, link):\n \"\"\"email for new session\"\"\"\n send_email(\n to=email,\n action=NEW_SESSION,\n subject=MAILS[NEW_SESSION]['subject'].format(\n event_name=event_name\n ),\n html=MAILS[NEW_SESSION]['message'].format(\n email=email,\n event_name=event_name,\n link=link\n )\n )\n", "path": "app/api/helpers/mail.py"}, {"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nfrom marshmallow import validates_schema\nimport marshmallow.validate as validate\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.settings import get_settings\nfrom app.models.session import Session\nfrom app.models.track import Track\nfrom app.models.speaker import Speaker\nfrom app.models.session_type import SessionType\nfrom app.models.microlocation import Microlocation\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permissions import current_identity\nfrom app.api.helpers.mail import send_email_new_session\n\n\nclass SessionSchema(Schema):\n \"\"\"\n Api schema for Session Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Session Api Schema\n \"\"\"\n type_ = 'session'\n self_view = 'v1.session_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n session = Session.query.filter_by(id=original_data['data']['id']).one()\n\n if 'starts_at' not in data:\n data['starts_at'] = session.starts_at\n\n if 'ends_at' not in data:\n data['ends_at'] = session.ends_at\n\n if data['starts_at'] >= data['ends_at']:\n raise UnprocessableEntity({'pointer': '/data/attributes/ends-at'}, \"ends-at should be after starts-at\")\n\n if 'state' in data:\n if data['state'] is not 'draft' or not 'pending':\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n if 'track' in data:\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n if 'microlocation' in data:\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n id = fields.Str(dump_only=True)\n title = fields.Str(required=True)\n subtitle = fields.Str(allow_none=True)\n level = fields.Int(allow_none=True)\n short_abstract = fields.Str(allow_none=True)\n long_abstract = fields.Str(allow_none=True)\n comments = fields.Str(allow_none=True)\n starts_at = fields.DateTime(required=True)\n ends_at = fields.DateTime(required=True)\n language = fields.Str(allow_none=True)\n slides_url = fields.Url(allow_none=True)\n video_url = fields.Url(allow_none=True)\n audio_url = fields.Url(allow_none=True)\n signup_url = fields.Url(allow_none=True)\n state = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"accepted\", \"confirmed\", \"rejected\", \"draft\"]),\n allow_none=True, default='draft')\n created_at = fields.DateTime(dump_only=True)\n deleted_at = fields.DateTime(dump_only=True)\n submitted_at = fields.DateTime(allow_none=True)\n is_mail_sent = fields.Boolean()\n microlocation = Relationship(attribute='microlocation',\n self_view='v1.session_microlocation',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.microlocation_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='MicrolocationSchema',\n type_='microlocation')\n track = Relationship(attribute='track',\n self_view='v1.session_track',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.track_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='TrackSchema',\n type_='track')\n session_type = Relationship(attribute='session_type',\n self_view='v1.session_session_type',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_type_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='SessionTypeSchema',\n type_='session-type')\n event = Relationship(attribute='event',\n self_view='v1.session_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='EventSchema',\n type_='event')\n speakers = Relationship(\n attribute='speakers',\n self_view='v1.session_speaker',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.speaker_list',\n related_view_kwargs={'session_id': '<id>'},\n schema='SpeakerSchema',\n type_='speaker')\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n def before_post(self, args, kwargs, data):\n require_relationship(['event'], data)\n data['creator_id'] = current_identity.id\n\n def after_create_object(self, session, data, view_kwargs):\n if session.event.get_organizer():\n event_name = session.event.name\n organizer_email = session.event.get_organizer().email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/{}/sessions/{}\"\\\n .format(frontend_url, session.event_id, session.id)\n send_email_new_session(organizer_email, event_name, link)\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object\n }}\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n query_ = query_.join(Event).filter(Event.identifier == event.id)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'query': query\n }}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'before_get_object': before_get_object}}\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session,\n 'model': Session}\n", "path": "app/api/sessions.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom flask import current_app\n\nfrom app import get_settings\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.log import record_activity\nfrom app.api.helpers.system_mails import MAILS\nfrom app.api.helpers.utilities import string_empty\nfrom app.models.mail import Mail, USER_CONFIRM, NEW_SESSION, SESSION_ACCEPT_REJECT\n\n\ndef send_email(to, action, subject, html):\n \"\"\"\n Sends email and records it in DB\n \"\"\"\n if not string_empty(to):\n email_service = get_settings()['email_service']\n email_from_name = get_settings()['email_from_name']\n if email_service == 'smtp':\n email_from = email_from_name + '<' + get_settings()['email_from'] + '>'\n else:\n email_from = get_settings()['email_from']\n payload = {\n 'to': to,\n 'from': email_from,\n 'subject': subject,\n 'html': html\n }\n\n if not current_app.config['TESTING']:\n if email_service == 'smtp':\n smtp_encryption = get_settings()['smtp_encryption']\n if smtp_encryption == 'tls':\n smtp_encryption = 'required'\n elif smtp_encryption == 'ssl':\n smtp_encryption = 'ssl'\n elif smtp_encryption == 'tls_optional':\n smtp_encryption = 'optional'\n else:\n smtp_encryption = 'none'\n\n config = {\n 'host': get_settings()['smtp_host'],\n 'username': get_settings()['smtp_username'],\n 'password': get_settings()['smtp_password'],\n 'encryption': smtp_encryption,\n 'port': get_settings()['smtp_port'],\n }\n\n from tasks import send_mail_via_smtp_task\n send_mail_via_smtp_task.delay(config, payload)\n else:\n payload['fromname'] = email_from_name\n key = get_settings()['sendgrid_key']\n if not key and not current_app.config['TESTING']:\n print('Sendgrid key not defined')\n return\n headers = {\n \"Authorization\": (\"Bearer \" + key)\n }\n from tasks import send_email_task\n send_email_task.delay(payload, headers)\n\n # record_mail(to, action, subject, html)\n mail = Mail(\n recipient=to, action=action, subject=subject,\n message=html, time=datetime.utcnow()\n )\n\n save_to_db(mail, 'Mail Recorded')\n record_activity('mail_event', email=to, action=action, subject=subject)\n return True\n\n\ndef send_email_confirmation(email, link):\n \"\"\"account confirmation\"\"\"\n send_email(\n to=email,\n action=USER_CONFIRM,\n subject=MAILS[USER_CONFIRM]['subject'],\n html=MAILS[USER_CONFIRM]['message'].format(\n email=email, link=link\n )\n )\n\n\ndef send_email_new_session(email, event_name, link):\n \"\"\"email for new session\"\"\"\n send_email(\n to=email,\n action=NEW_SESSION,\n subject=MAILS[NEW_SESSION]['subject'].format(\n event_name=event_name\n ),\n html=MAILS[NEW_SESSION]['message'].format(\n email=email,\n event_name=event_name,\n link=link\n )\n )\n\n\ndef send_email_session_accept_reject(email, session, link):\n \"\"\"email for new session\"\"\"\n session_name = session.title\n session_acceptance = session.state\n send_email(\n to=email,\n action=SESSION_ACCEPT_REJECT,\n subject=MAILS[SESSION_ACCEPT_REJECT]['subject'].format(\n session_name=session_name,\n acceptance=session_acceptance\n ),\n html=MAILS[SESSION_ACCEPT_REJECT]['message'].format(\n email=email,\n session_name=session_name,\n acceptance=session_acceptance,\n link=link\n )\n )\n", "path": "app/api/helpers/mail.py"}, {"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nfrom marshmallow import validates_schema\nimport marshmallow.validate as validate\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.settings import get_settings\nfrom app.models.session import Session\nfrom app.models.track import Track\nfrom app.models.speaker import Speaker\nfrom app.models.session_type import SessionType\nfrom app.models.microlocation import Microlocation\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permissions import current_identity\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\n\n\nclass SessionSchema(Schema):\n \"\"\"\n Api schema for Session Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Session Api Schema\n \"\"\"\n type_ = 'session'\n self_view = 'v1.session_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n session = Session.query.filter_by(id=original_data['data']['id']).one()\n\n if 'starts_at' not in data:\n data['starts_at'] = session.starts_at\n\n if 'ends_at' not in data:\n data['ends_at'] = session.ends_at\n\n if 'event' not in data:\n data['event'] = session.event_id\n\n if data['starts_at'] >= data['ends_at']:\n raise UnprocessableEntity({'pointer': '/data/attributes/ends-at'}, \"ends-at should be after starts-at\")\n\n if 'state' in data:\n if data['state'] is not 'draft' or not 'pending':\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n if 'track' in data:\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n if 'microlocation' in data:\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n id = fields.Str(dump_only=True)\n title = fields.Str(required=True)\n subtitle = fields.Str(allow_none=True)\n level = fields.Int(allow_none=True)\n short_abstract = fields.Str(allow_none=True)\n long_abstract = fields.Str(allow_none=True)\n comments = fields.Str(allow_none=True)\n starts_at = fields.DateTime(required=True)\n ends_at = fields.DateTime(required=True)\n language = fields.Str(allow_none=True)\n slides_url = fields.Url(allow_none=True)\n video_url = fields.Url(allow_none=True)\n audio_url = fields.Url(allow_none=True)\n signup_url = fields.Url(allow_none=True)\n state = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"accepted\", \"confirmed\", \"rejected\", \"draft\"]),\n allow_none=True, default='draft')\n created_at = fields.DateTime(dump_only=True)\n deleted_at = fields.DateTime(dump_only=True)\n submitted_at = fields.DateTime(allow_none=True)\n is_mail_sent = fields.Boolean()\n microlocation = Relationship(attribute='microlocation',\n self_view='v1.session_microlocation',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.microlocation_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='MicrolocationSchema',\n type_='microlocation')\n track = Relationship(attribute='track',\n self_view='v1.session_track',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.track_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='TrackSchema',\n type_='track')\n session_type = Relationship(attribute='session_type',\n self_view='v1.session_session_type',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_type_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='SessionTypeSchema',\n type_='session-type')\n event = Relationship(attribute='event',\n self_view='v1.session_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='EventSchema',\n type_='event')\n speakers = Relationship(\n attribute='speakers',\n self_view='v1.session_speaker',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.speaker_list',\n related_view_kwargs={'session_id': '<id>'},\n schema='SpeakerSchema',\n type_='speaker')\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n def before_post(self, args, kwargs, data):\n require_relationship(['event'], data)\n data['creator_id'] = current_identity.id\n\n def after_create_object(self, session, data, view_kwargs):\n if session.event.get_organizer():\n event_name = session.event.name\n organizer_email = session.event.get_organizer().email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\"\\\n .format(frontend_url, session.event_id, session.id)\n send_email_new_session(organizer_email, event_name, link)\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object\n }}\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n query_ = query_.join(Event).filter(Event.identifier == event.id)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'query': query\n }}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(speaker.email, session, link)\n\n # Email for organizer\n if session.event.get_organizer():\n organizer_email = session.event.get_organizer().email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(organizer_email, session,\n link)\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'before_get_object': before_get_object,\n 'after_update_object': after_update_object}}\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session,\n 'model': Session}\n", "path": "app/api/sessions.py"}]} | 3,728 | 934 |
gh_patches_debug_47929 | rasdani/github-patches | git_diff | liqd__a4-opin-1835 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sort by section changes automatically to "most recent" on productive
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/ideas/templatetags/idea_tags.py`
Content:
```
1 from django import template
2
3 from euth.ideas.models import Idea
4
5 register = template.Library()
6
7
8 @register.simple_tag
9 def get_range(number, listcount):
10 if number < 3:
11 return range(1, 6)
12 elif number > listcount - 2:
13 return range(listcount - 4, listcount + 1)
14 else:
15 return range(number - 2, number + 3)
16
17
18 @register.simple_tag
19 def is_idea_list(module):
20 return Idea.objects.filter(module=module).count() > 0
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/ideas/templatetags/idea_tags.py b/euth/ideas/templatetags/idea_tags.py
--- a/euth/ideas/templatetags/idea_tags.py
+++ b/euth/ideas/templatetags/idea_tags.py
@@ -18,3 +18,12 @@
@register.simple_tag
def is_idea_list(module):
return Idea.objects.filter(module=module).count() > 0
+
+
[email protected]_tag
+def combined_url_parameter(request_query_dict, **kwargs):
+ combined_query_dict = request_query_dict.copy()
+ for key in kwargs:
+ combined_query_dict.setlist(key, [kwargs[key]])
+ encoded_parameter = '?' + combined_query_dict.urlencode()
+ return encoded_parameter
| {"golden_diff": "diff --git a/euth/ideas/templatetags/idea_tags.py b/euth/ideas/templatetags/idea_tags.py\n--- a/euth/ideas/templatetags/idea_tags.py\n+++ b/euth/ideas/templatetags/idea_tags.py\n@@ -18,3 +18,12 @@\n @register.simple_tag\n def is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n+\n+\[email protected]_tag\n+def combined_url_parameter(request_query_dict, **kwargs):\n+ combined_query_dict = request_query_dict.copy()\n+ for key in kwargs:\n+ combined_query_dict.setlist(key, [kwargs[key]])\n+ encoded_parameter = '?' + combined_query_dict.urlencode()\n+ return encoded_parameter\n", "issue": "Sort by section changes automatically to \"most recent\" on productive\n\n", "before_files": [{"content": "from django import template\n\nfrom euth.ideas.models import Idea\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_range(number, listcount):\n if number < 3:\n return range(1, 6)\n elif number > listcount - 2:\n return range(listcount - 4, listcount + 1)\n else:\n return range(number - 2, number + 3)\n\n\[email protected]_tag\ndef is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n", "path": "euth/ideas/templatetags/idea_tags.py"}], "after_files": [{"content": "from django import template\n\nfrom euth.ideas.models import Idea\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_range(number, listcount):\n if number < 3:\n return range(1, 6)\n elif number > listcount - 2:\n return range(listcount - 4, listcount + 1)\n else:\n return range(number - 2, number + 3)\n\n\[email protected]_tag\ndef is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n", "path": "euth/ideas/templatetags/idea_tags.py"}]} | 434 | 174 |
gh_patches_debug_37706 | rasdani/github-patches | git_diff | mlflow__mlflow-1330 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for 'compile=False' when loading Keras models
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: Yes
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **MLflow installed from (source or binary)**: binary
- **MLflow version (run ``mlflow --version``)**: 0.9
- **Python version**: 3.6
- **npm version (if running the dev UI):
- **Exact command to reproduce**:
### Describe the problem
When loading Keras models that were trained with custom losses and metrics, Keras will throw an error if those functions are not found:
https://github.com/keras-team/keras/issues/5916
These functions are not needed for prediction, and this can be solved by adding the 'compile=False' keyword parameter when calling load_model:
https://github.com/keras-team/keras/issues/5916#issuecomment-457624404
However, mlflow does not support passing any of these when loading and saving Keras models.
Support for this should be added to support more complex models and training use cases.
In the meanwhile, does anyone have any suggestions for workarounds?
Support for 'compile=False' when loading Keras models
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: Yes
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **MLflow installed from (source or binary)**: binary
- **MLflow version (run ``mlflow --version``)**: 0.9
- **Python version**: 3.6
- **npm version (if running the dev UI):
- **Exact command to reproduce**:
### Describe the problem
When loading Keras models that were trained with custom losses and metrics, Keras will throw an error if those functions are not found:
https://github.com/keras-team/keras/issues/5916
These functions are not needed for prediction, and this can be solved by adding the 'compile=False' keyword parameter when calling load_model:
https://github.com/keras-team/keras/issues/5916#issuecomment-457624404
However, mlflow does not support passing any of these when loading and saving Keras models.
Support for this should be added to support more complex models and training use cases.
In the meanwhile, does anyone have any suggestions for workarounds?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/keras.py`
Content:
```
1 """
2 The ``mlflow.keras`` module provides an API for logging and loading Keras models. This module
3 exports Keras models with the following flavors:
4
5 Keras (native) format
6 This is the main flavor that can be loaded back into Keras.
7 :py:mod:`mlflow.pyfunc`
8 Produced for use by generic pyfunc-based deployment tools and batch inference.
9 """
10
11 from __future__ import absolute_import
12
13 import os
14 import yaml
15
16 import pandas as pd
17
18 from mlflow import pyfunc
19 from mlflow.models import Model
20 import mlflow.tracking
21 from mlflow.tracking.artifact_utils import _download_artifact_from_uri
22 from mlflow.utils.environment import _mlflow_conda_env
23 from mlflow.utils.model_utils import _get_flavor_configuration
24
25 FLAVOR_NAME = "keras"
26
27
28 def get_default_conda_env():
29 """
30 :return: The default Conda environment for MLflow Models produced by calls to
31 :func:`save_model()` and :func:`log_model()`.
32 """
33 import keras
34 import tensorflow as tf
35
36 return _mlflow_conda_env(
37 additional_conda_deps=[
38 "keras={}".format(keras.__version__),
39 # The Keras pyfunc representation requires the TensorFlow
40 # backend for Keras. Therefore, the conda environment must
41 # include TensorFlow
42 "tensorflow=={}".format(tf.__version__),
43 ],
44 additional_pip_deps=None,
45 additional_conda_channels=None)
46
47
48 def save_model(keras_model, path, conda_env=None, mlflow_model=Model()):
49 """
50 Save a Keras model to a path on the local file system.
51
52 :param keras_model: Keras model to be saved.
53 :param path: Local path where the model is to be saved.
54 :param conda_env: Either a dictionary representation of a Conda environment or the path to a
55 Conda environment yaml file. If provided, this decribes the environment
56 this model should be run in. At minimum, it should specify the dependencies
57 contained in :func:`get_default_conda_env()`. If `None`, the default
58 :func:`get_default_conda_env()` environment is added to the model.
59 The following is an *example* dictionary representation of a Conda
60 environment::
61
62 {
63 'name': 'mlflow-env',
64 'channels': ['defaults'],
65 'dependencies': [
66 'python=3.7.0',
67 'keras=2.2.4',
68 'tensorflow=1.8.0'
69 ]
70 }
71
72 :param mlflow_model: MLflow model config this flavor is being added to.
73
74 >>> import mlflow
75 >>> # Build, compile, and train your model
76 >>> keras_model = ...
77 >>> keras_model_path = ...
78 >>> keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
79 >>> results = keras_model.fit(
80 ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
81 ... # Save the model as an MLflow Model
82 >>> mlflow.keras.save_model(keras_model, keras_model_path)
83 """
84 import keras
85
86 path = os.path.abspath(path)
87 if os.path.exists(path):
88 raise Exception("Path '{}' already exists".format(path))
89 os.makedirs(path)
90 model_data_subpath = "model.h5"
91 keras_model.save(os.path.join(path, model_data_subpath))
92
93 conda_env_subpath = "conda.yaml"
94 if conda_env is None:
95 conda_env = get_default_conda_env()
96 elif not isinstance(conda_env, dict):
97 with open(conda_env, "r") as f:
98 conda_env = yaml.safe_load(f)
99 with open(os.path.join(path, conda_env_subpath), "w") as f:
100 yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
101
102 pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras",
103 data=model_data_subpath, env=conda_env_subpath)
104 mlflow_model.add_flavor(FLAVOR_NAME, keras_version=keras.__version__, data=model_data_subpath)
105 mlflow_model.save(os.path.join(path, "MLmodel"))
106
107
108 def log_model(keras_model, artifact_path, conda_env=None, **kwargs):
109 """
110 Log a Keras model as an MLflow artifact for the current run.
111
112 :param keras_model: Keras model to be saved.
113 :param artifact_path: Run-relative artifact path.
114 :param conda_env: Either a dictionary representation of a Conda environment or the path to a
115 Conda environment yaml file. If provided, this decribes the environment
116 this model should be run in. At minimum, it should specify the dependencies
117 contained in :func:`get_default_conda_env()`. If `None`, the default
118 :func:`mlflow.keras.get_default_conda_env()` environment is added to the
119 model. The following is an *example* dictionary representation of a Conda
120 environment::
121
122 {
123 'name': 'mlflow-env',
124 'channels': ['defaults'],
125 'dependencies': [
126 'python=3.7.0',
127 'keras=2.2.4',
128 'tensorflow=1.8.0'
129 ]
130 }
131
132 :param kwargs: kwargs to pass to ``keras_model.save`` method.
133
134 >>> from keras import Dense, layers
135 >>> import mlflow
136 >>> # Build, compile, and train your model
137 >>> keras_model = ...
138 >>> keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
139 >>> results = keras_model.fit(
140 ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
141 >>> # Log metrics and log the model
142 >>> with mlflow.start_run() as run:
143 >>> mlflow.keras.log_model(keras_model, "models")
144 """
145 Model.log(artifact_path=artifact_path, flavor=mlflow.keras,
146 keras_model=keras_model, conda_env=conda_env, **kwargs)
147
148
149 def _load_model(model_file):
150 import keras
151 import keras.models
152 import h5py
153
154 from distutils.version import StrictVersion
155
156 if StrictVersion(keras.__version__) >= StrictVersion("2.2.3"):
157 # NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
158 # of string to avoid issues.
159 with h5py.File(os.path.abspath(model_file), "r") as model_file:
160 return keras.models.load_model(model_file)
161 else:
162 # NOTE: Older versions of Keras only handle filepath.
163 return keras.models.load_model(model_file)
164
165
166 class _KerasModelWrapper:
167 def __init__(self, keras_model, graph, sess):
168 self.keras_model = keras_model
169 self._graph = graph
170 self._sess = sess
171
172 def predict(self, dataframe):
173 with self._graph.as_default():
174 with self._sess.as_default():
175 predicted = pd.DataFrame(self.keras_model.predict(dataframe))
176 predicted.index = dataframe.index
177 return predicted
178
179
180 def _load_pyfunc(path):
181 """
182 Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
183
184 :param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
185 """
186 import keras.backend as K
187 import tensorflow as tf
188
189 if K._BACKEND == 'tensorflow':
190 graph = tf.Graph()
191 sess = tf.Session(graph=graph)
192 # By default tf backed models depend on the global graph and session.
193 # We create an use new Graph and Session and store them with the model
194 # This way the model is independent on the global state.
195 with graph.as_default():
196 with sess.as_default(): # pylint:disable=not-context-manager
197 K.set_learning_phase(0)
198 m = _load_model(path)
199 return _KerasModelWrapper(m, graph, sess)
200 else:
201 raise Exception("Unsupported backend '%s'" % K._BACKEND)
202
203
204 def load_model(model_uri):
205 """
206 Load a Keras model from a local file (if ``run_id`` is None) or a run.
207
208 :param model_uri: The location, in URI format, of the MLflow model, for example:
209
210 - ``/Users/me/path/to/local/model``
211 - ``relative/path/to/local/model``
212 - ``s3://my_bucket/path/to/model``
213 - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
214
215 For more information about supported URI schemes, see the
216 `Artifacts Documentation <https://www.mlflow.org/docs/latest/tracking.html#
217 supported-artifact-stores>`_.
218
219 :return: A Keras model instance.
220
221 >>> # Load persisted model as a Keras model or as a PyFunc, call predict() on a Pandas DataFrame
222 >>> keras_model = mlflow.keras.load_model("models", run_id="96771d893a5e46159d9f3b49bf9013e2")
223 >>> predictions = keras_model.predict(x_test)
224 """
225 local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
226 flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
227 # Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a
228 # `data` key; in this case, we assume the model artifact path to be `model.h5`
229 keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get("data", "model.h5"))
230 return _load_model(model_file=keras_model_artifacts_path)
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/keras.py b/mlflow/keras.py
--- a/mlflow/keras.py
+++ b/mlflow/keras.py
@@ -146,21 +146,21 @@
keras_model=keras_model, conda_env=conda_env, **kwargs)
-def _load_model(model_file):
+def _load_model(model_file, **kwargs):
import keras
import keras.models
import h5py
from distutils.version import StrictVersion
- if StrictVersion(keras.__version__) >= StrictVersion("2.2.3"):
+ if StrictVersion(keras.__version__.split('-')[0]) >= StrictVersion("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
with h5py.File(os.path.abspath(model_file), "r") as model_file:
- return keras.models.load_model(model_file)
+ return keras.models.load_model(model_file, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
- return keras.models.load_model(model_file)
+ return keras.models.load_model(model_file, **kwargs)
class _KerasModelWrapper:
@@ -195,15 +195,16 @@
with graph.as_default():
with sess.as_default(): # pylint:disable=not-context-manager
K.set_learning_phase(0)
- m = _load_model(path)
+ m = _load_model(path, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
raise Exception("Unsupported backend '%s'" % K._BACKEND)
-def load_model(model_uri):
+def load_model(model_uri, **kwargs):
"""
Load a Keras model from a local file (if ``run_id`` is None) or a run.
+ Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model, for example:
@@ -227,4 +228,4 @@
# Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a
# `data` key; in this case, we assume the model artifact path to be `model.h5`
keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get("data", "model.h5"))
- return _load_model(model_file=keras_model_artifacts_path)
+ return _load_model(model_file=keras_model_artifacts_path, **kwargs)
| {"golden_diff": "diff --git a/mlflow/keras.py b/mlflow/keras.py\n--- a/mlflow/keras.py\n+++ b/mlflow/keras.py\n@@ -146,21 +146,21 @@\n keras_model=keras_model, conda_env=conda_env, **kwargs)\n \n \n-def _load_model(model_file):\n+def _load_model(model_file, **kwargs):\n import keras\n import keras.models\n import h5py\n \n from distutils.version import StrictVersion\n \n- if StrictVersion(keras.__version__) >= StrictVersion(\"2.2.3\"):\n+ if StrictVersion(keras.__version__.split('-')[0]) >= StrictVersion(\"2.2.3\"):\n # NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead\n # of string to avoid issues.\n with h5py.File(os.path.abspath(model_file), \"r\") as model_file:\n- return keras.models.load_model(model_file)\n+ return keras.models.load_model(model_file, **kwargs)\n else:\n # NOTE: Older versions of Keras only handle filepath.\n- return keras.models.load_model(model_file)\n+ return keras.models.load_model(model_file, **kwargs)\n \n \n class _KerasModelWrapper:\n@@ -195,15 +195,16 @@\n with graph.as_default():\n with sess.as_default(): # pylint:disable=not-context-manager\n K.set_learning_phase(0)\n- m = _load_model(path)\n+ m = _load_model(path, compile=False)\n return _KerasModelWrapper(m, graph, sess)\n else:\n raise Exception(\"Unsupported backend '%s'\" % K._BACKEND)\n \n \n-def load_model(model_uri):\n+def load_model(model_uri, **kwargs):\n \"\"\"\n Load a Keras model from a local file (if ``run_id`` is None) or a run.\n+ Extra arguments are passed through to keras.load_model.\n \n :param model_uri: The location, in URI format, of the MLflow model, for example:\n \n@@ -227,4 +228,4 @@\n # Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a\n # `data` key; in this case, we assume the model artifact path to be `model.h5`\n keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get(\"data\", \"model.h5\"))\n- return _load_model(model_file=keras_model_artifacts_path)\n+ return _load_model(model_file=keras_model_artifacts_path, **kwargs)\n", "issue": "Support for 'compile=False' when loading Keras models\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: Yes\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **MLflow installed from (source or binary)**: binary\r\n- **MLflow version (run ``mlflow --version``)**: 0.9\r\n- **Python version**: 3.6\r\n- **npm version (if running the dev UI):\r\n- **Exact command to reproduce**:\r\n\r\n### Describe the problem\r\n\r\nWhen loading Keras models that were trained with custom losses and metrics, Keras will throw an error if those functions are not found:\r\nhttps://github.com/keras-team/keras/issues/5916\r\n\r\nThese functions are not needed for prediction, and this can be solved by adding the 'compile=False' keyword parameter when calling load_model:\r\nhttps://github.com/keras-team/keras/issues/5916#issuecomment-457624404\r\n\r\nHowever, mlflow does not support passing any of these when loading and saving Keras models. \r\n\r\nSupport for this should be added to support more complex models and training use cases.\r\n\r\nIn the meanwhile, does anyone have any suggestions for workarounds?\r\n\r\n\nSupport for 'compile=False' when loading Keras models\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: Yes\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **MLflow installed from (source or binary)**: binary\r\n- **MLflow version (run ``mlflow --version``)**: 0.9\r\n- **Python version**: 3.6\r\n- **npm version (if running the dev UI):\r\n- **Exact command to reproduce**:\r\n\r\n### Describe the problem\r\n\r\nWhen loading Keras models that were trained with custom losses and metrics, Keras will throw an error if those functions are not found:\r\nhttps://github.com/keras-team/keras/issues/5916\r\n\r\nThese functions are not needed for prediction, and this can be solved by adding the 'compile=False' keyword parameter when calling load_model:\r\nhttps://github.com/keras-team/keras/issues/5916#issuecomment-457624404\r\n\r\nHowever, mlflow does not support passing any of these when loading and saving Keras models. \r\n\r\nSupport for this should be added to support more complex models and training use cases.\r\n\r\nIn the meanwhile, does anyone have any suggestions for workarounds?\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe ``mlflow.keras`` module provides an API for logging and loading Keras models. This module\nexports Keras models with the following flavors:\n\nKeras (native) format\n This is the main flavor that can be loaded back into Keras.\n:py:mod:`mlflow.pyfunc`\n Produced for use by generic pyfunc-based deployment tools and batch inference.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport yaml\n\nimport pandas as pd\n\nfrom mlflow import pyfunc\nfrom mlflow.models import Model\nimport mlflow.tracking\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\n\nFLAVOR_NAME = \"keras\"\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n \"\"\"\n import keras\n import tensorflow as tf\n\n return _mlflow_conda_env(\n additional_conda_deps=[\n \"keras={}\".format(keras.__version__),\n # The Keras pyfunc representation requires the TensorFlow\n # backend for Keras. Therefore, the conda environment must\n # include TensorFlow\n \"tensorflow=={}\".format(tf.__version__),\n ],\n additional_pip_deps=None,\n additional_conda_channels=None)\n\n\ndef save_model(keras_model, path, conda_env=None, mlflow_model=Model()):\n \"\"\"\n Save a Keras model to a path on the local file system.\n\n :param keras_model: Keras model to be saved.\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If `None`, the default\n :func:`get_default_conda_env()` environment is added to the model.\n The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'keras=2.2.4',\n 'tensorflow=1.8.0'\n ]\n }\n\n :param mlflow_model: MLflow model config this flavor is being added to.\n\n >>> import mlflow\n >>> # Build, compile, and train your model\n >>> keras_model = ...\n >>> keras_model_path = ...\n >>> keras_model.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"accuracy\"])\n >>> results = keras_model.fit(\n ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n ... # Save the model as an MLflow Model\n >>> mlflow.keras.save_model(keras_model, keras_model_path)\n \"\"\"\n import keras\n\n path = os.path.abspath(path)\n if os.path.exists(path):\n raise Exception(\"Path '{}' already exists\".format(path))\n os.makedirs(path)\n model_data_subpath = \"model.h5\"\n keras_model.save(os.path.join(path, model_data_subpath))\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n pyfunc.add_to_model(mlflow_model, loader_module=\"mlflow.keras\",\n data=model_data_subpath, env=conda_env_subpath)\n mlflow_model.add_flavor(FLAVOR_NAME, keras_version=keras.__version__, data=model_data_subpath)\n mlflow_model.save(os.path.join(path, \"MLmodel\"))\n\n\ndef log_model(keras_model, artifact_path, conda_env=None, **kwargs):\n \"\"\"\n Log a Keras model as an MLflow artifact for the current run.\n\n :param keras_model: Keras model to be saved.\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If `None`, the default\n :func:`mlflow.keras.get_default_conda_env()` environment is added to the\n model. The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'keras=2.2.4',\n 'tensorflow=1.8.0'\n ]\n }\n\n :param kwargs: kwargs to pass to ``keras_model.save`` method.\n\n >>> from keras import Dense, layers\n >>> import mlflow\n >>> # Build, compile, and train your model\n >>> keras_model = ...\n >>> keras_model.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"accuracy\"])\n >>> results = keras_model.fit(\n ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n >>> # Log metrics and log the model\n >>> with mlflow.start_run() as run:\n >>> mlflow.keras.log_model(keras_model, \"models\")\n \"\"\"\n Model.log(artifact_path=artifact_path, flavor=mlflow.keras,\n keras_model=keras_model, conda_env=conda_env, **kwargs)\n\n\ndef _load_model(model_file):\n import keras\n import keras.models\n import h5py\n\n from distutils.version import StrictVersion\n\n if StrictVersion(keras.__version__) >= StrictVersion(\"2.2.3\"):\n # NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead\n # of string to avoid issues.\n with h5py.File(os.path.abspath(model_file), \"r\") as model_file:\n return keras.models.load_model(model_file)\n else:\n # NOTE: Older versions of Keras only handle filepath.\n return keras.models.load_model(model_file)\n\n\nclass _KerasModelWrapper:\n def __init__(self, keras_model, graph, sess):\n self.keras_model = keras_model\n self._graph = graph\n self._sess = sess\n\n def predict(self, dataframe):\n with self._graph.as_default():\n with self._sess.as_default():\n predicted = pd.DataFrame(self.keras_model.predict(dataframe))\n predicted.index = dataframe.index\n return predicted\n\n\ndef _load_pyfunc(path):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.\n \"\"\"\n import keras.backend as K\n import tensorflow as tf\n\n if K._BACKEND == 'tensorflow':\n graph = tf.Graph()\n sess = tf.Session(graph=graph)\n # By default tf backed models depend on the global graph and session.\n # We create an use new Graph and Session and store them with the model\n # This way the model is independent on the global state.\n with graph.as_default():\n with sess.as_default(): # pylint:disable=not-context-manager\n K.set_learning_phase(0)\n m = _load_model(path)\n return _KerasModelWrapper(m, graph, sess)\n else:\n raise Exception(\"Unsupported backend '%s'\" % K._BACKEND)\n\n\ndef load_model(model_uri):\n \"\"\"\n Load a Keras model from a local file (if ``run_id`` is None) or a run.\n\n :param model_uri: The location, in URI format, of the MLflow model, for example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n\n For more information about supported URI schemes, see the\n `Artifacts Documentation <https://www.mlflow.org/docs/latest/tracking.html#\n supported-artifact-stores>`_.\n\n :return: A Keras model instance.\n\n >>> # Load persisted model as a Keras model or as a PyFunc, call predict() on a Pandas DataFrame\n >>> keras_model = mlflow.keras.load_model(\"models\", run_id=\"96771d893a5e46159d9f3b49bf9013e2\")\n >>> predictions = keras_model.predict(x_test)\n \"\"\"\n local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)\n flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)\n # Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a\n # `data` key; in this case, we assume the model artifact path to be `model.h5`\n keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get(\"data\", \"model.h5\"))\n return _load_model(model_file=keras_model_artifacts_path)\n", "path": "mlflow/keras.py"}], "after_files": [{"content": "\"\"\"\nThe ``mlflow.keras`` module provides an API for logging and loading Keras models. This module\nexports Keras models with the following flavors:\n\nKeras (native) format\n This is the main flavor that can be loaded back into Keras.\n:py:mod:`mlflow.pyfunc`\n Produced for use by generic pyfunc-based deployment tools and batch inference.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport yaml\n\nimport pandas as pd\n\nfrom mlflow import pyfunc\nfrom mlflow.models import Model\nimport mlflow.tracking\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\n\nFLAVOR_NAME = \"keras\"\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n \"\"\"\n import keras\n import tensorflow as tf\n\n return _mlflow_conda_env(\n additional_conda_deps=[\n \"keras={}\".format(keras.__version__),\n # The Keras pyfunc representation requires the TensorFlow\n # backend for Keras. Therefore, the conda environment must\n # include TensorFlow\n \"tensorflow=={}\".format(tf.__version__),\n ],\n additional_pip_deps=None,\n additional_conda_channels=None)\n\n\ndef save_model(keras_model, path, conda_env=None, mlflow_model=Model()):\n \"\"\"\n Save a Keras model to a path on the local file system.\n\n :param keras_model: Keras model to be saved.\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If `None`, the default\n :func:`get_default_conda_env()` environment is added to the model.\n The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'keras=2.2.4',\n 'tensorflow=1.8.0'\n ]\n }\n\n :param mlflow_model: MLflow model config this flavor is being added to.\n\n >>> import mlflow\n >>> # Build, compile, and train your model\n >>> keras_model = ...\n >>> keras_model_path = ...\n >>> keras_model.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"accuracy\"])\n >>> results = keras_model.fit(\n ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n ... # Save the model as an MLflow Model\n >>> mlflow.keras.save_model(keras_model, keras_model_path)\n \"\"\"\n import keras\n\n path = os.path.abspath(path)\n if os.path.exists(path):\n raise Exception(\"Path '{}' already exists\".format(path))\n os.makedirs(path)\n model_data_subpath = \"model.h5\"\n keras_model.save(os.path.join(path, model_data_subpath))\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n pyfunc.add_to_model(mlflow_model, loader_module=\"mlflow.keras\",\n data=model_data_subpath, env=conda_env_subpath)\n mlflow_model.add_flavor(FLAVOR_NAME, keras_version=keras.__version__, data=model_data_subpath)\n mlflow_model.save(os.path.join(path, \"MLmodel\"))\n\n\ndef log_model(keras_model, artifact_path, conda_env=None, **kwargs):\n \"\"\"\n Log a Keras model as an MLflow artifact for the current run.\n\n :param keras_model: Keras model to be saved.\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If `None`, the default\n :func:`mlflow.keras.get_default_conda_env()` environment is added to the\n model. The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'keras=2.2.4',\n 'tensorflow=1.8.0'\n ]\n }\n\n :param kwargs: kwargs to pass to ``keras_model.save`` method.\n\n >>> from keras import Dense, layers\n >>> import mlflow\n >>> # Build, compile, and train your model\n >>> keras_model = ...\n >>> keras_model.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"accuracy\"])\n >>> results = keras_model.fit(\n ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n >>> # Log metrics and log the model\n >>> with mlflow.start_run() as run:\n >>> mlflow.keras.log_model(keras_model, \"models\")\n \"\"\"\n Model.log(artifact_path=artifact_path, flavor=mlflow.keras,\n keras_model=keras_model, conda_env=conda_env, **kwargs)\n\n\ndef _load_model(model_file, **kwargs):\n import keras\n import keras.models\n import h5py\n\n from distutils.version import StrictVersion\n\n if StrictVersion(keras.__version__.split('-')[0]) >= StrictVersion(\"2.2.3\"):\n # NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead\n # of string to avoid issues.\n with h5py.File(os.path.abspath(model_file), \"r\") as model_file:\n return keras.models.load_model(model_file, **kwargs)\n else:\n # NOTE: Older versions of Keras only handle filepath.\n return keras.models.load_model(model_file, **kwargs)\n\n\nclass _KerasModelWrapper:\n def __init__(self, keras_model, graph, sess):\n self.keras_model = keras_model\n self._graph = graph\n self._sess = sess\n\n def predict(self, dataframe):\n with self._graph.as_default():\n with self._sess.as_default():\n predicted = pd.DataFrame(self.keras_model.predict(dataframe))\n predicted.index = dataframe.index\n return predicted\n\n\ndef _load_pyfunc(path):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.\n \"\"\"\n import keras.backend as K\n import tensorflow as tf\n\n if K._BACKEND == 'tensorflow':\n graph = tf.Graph()\n sess = tf.Session(graph=graph)\n # By default tf backed models depend on the global graph and session.\n # We create an use new Graph and Session and store them with the model\n # This way the model is independent on the global state.\n with graph.as_default():\n with sess.as_default(): # pylint:disable=not-context-manager\n K.set_learning_phase(0)\n m = _load_model(path, compile=False)\n return _KerasModelWrapper(m, graph, sess)\n else:\n raise Exception(\"Unsupported backend '%s'\" % K._BACKEND)\n\n\ndef load_model(model_uri, **kwargs):\n \"\"\"\n Load a Keras model from a local file (if ``run_id`` is None) or a run.\n Extra arguments are passed through to keras.load_model.\n\n :param model_uri: The location, in URI format, of the MLflow model, for example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n\n For more information about supported URI schemes, see the\n `Artifacts Documentation <https://www.mlflow.org/docs/latest/tracking.html#\n supported-artifact-stores>`_.\n\n :return: A Keras model instance.\n\n >>> # Load persisted model as a Keras model or as a PyFunc, call predict() on a Pandas DataFrame\n >>> keras_model = mlflow.keras.load_model(\"models\", run_id=\"96771d893a5e46159d9f3b49bf9013e2\")\n >>> predictions = keras_model.predict(x_test)\n \"\"\"\n local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)\n flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)\n # Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a\n # `data` key; in this case, we assume the model artifact path to be `model.h5`\n keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get(\"data\", \"model.h5\"))\n return _load_model(model_file=keras_model_artifacts_path, **kwargs)\n", "path": "mlflow/keras.py"}]} | 3,566 | 581 |
gh_patches_debug_14905 | rasdani/github-patches | git_diff | refuel-ai__autolabel-423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Confusing error message when OpenAI API Key is missing
**Describe the bug**
When the OpenAI API key is missing, we see the following error message:
```
File ~/.pyenv/versions/3.8.13/envs/refuel-main/lib/python3.8/site-packages/langchain/load/serializable.py:64, in Serializable.__init__(self, **kwargs)
63 def __init__(self, **kwargs: Any) -> None:
---> 64 super().__init__(**kwargs)
65 self._lc_kwargs = kwargs
File ~/.pyenv/versions/3.8.13/envs/refuel-main/lib/python3.8/site-packages/pydantic/main.py:341, in pydantic.main.BaseModel.__init__()
ValidationError: 1 validation error for ChatOpenAI
__root__
Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error)
```
This is an error from `langchain`, but `autolabel` doesn't allow passing `openai_api_key` as a named parameter anywhere.
**To Reproduce**
Call `agent.plan` for a config that uses an OpenAI model without providing the API key as an env variable.
**Expected behavior**
We should throw an error that simply says that the user should add the API key as an environment variable and not that it should be a named parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/autolabel/models/openai.py`
Content:
```
1 from functools import cached_property
2 from typing import List, Optional
3 import logging
4
5 from langchain.chat_models import ChatOpenAI
6 from langchain.llms import OpenAI
7 from langchain.schema import LLMResult, HumanMessage, Generation
8 import tiktoken
9
10 from autolabel.models import BaseModel
11 from autolabel.configs import AutolabelConfig
12 from autolabel.cache import BaseCache
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class OpenAILLM(BaseModel):
19 CHAT_ENGINE_MODELS = [
20 "gpt-3.5-turbo",
21 "gpt-3.5-turbo-0613",
22 "gpt-3.5-turbo-16k",
23 "gpt-3.5-turbo-16k-0613",
24 "gpt-4",
25 "gpt-4-0613",
26 "gpt-4-32k",
27 "gpt-4-32k-0613",
28 ]
29 MODELS_WITH_TOKEN_PROBS = ["text-curie-001", "text-davinci-003"]
30
31 # Default parameters for OpenAILLM
32 DEFAULT_MODEL = "gpt-3.5-turbo"
33 DEFAULT_PARAMS_COMPLETION_ENGINE = {
34 "max_tokens": 1000,
35 "temperature": 0.0,
36 "model_kwargs": {"logprobs": 1},
37 }
38 DEFAULT_PARAMS_CHAT_ENGINE = {
39 "max_tokens": 1000,
40 "temperature": 0.0,
41 }
42
43 # Reference: https://openai.com/pricing
44 COST_PER_PROMPT_TOKEN = {
45 "text-davinci-003": 0.02 / 1000,
46 "text-curie-001": 0.002 / 1000,
47 "gpt-3.5-turbo": 0.0015 / 1000,
48 "gpt-3.5-turbo-0613": 0.0015 / 1000,
49 "gpt-3.5-turbo-16k": 0.003 / 1000,
50 "gpt-3.5-turbo-16k-0613": 0.003 / 1000,
51 "gpt-4": 0.03 / 1000,
52 "gpt-4-0613": 0.03 / 1000,
53 "gpt-4-32k": 0.06 / 1000,
54 "gpt-4-32k-0613": 0.06 / 1000,
55 }
56 COST_PER_COMPLETION_TOKEN = {
57 "text-davinci-003": 0.02 / 1000,
58 "text-curie-001": 0.002 / 1000,
59 "gpt-3.5-turbo": 0.002 / 1000,
60 "gpt-3.5-turbo-0613": 0.002 / 1000,
61 "gpt-3.5-turbo-16k": 0.004 / 1000,
62 "gpt-3.5-turbo-16k-0613": 0.004 / 1000,
63 "gpt-4": 0.06 / 1000,
64 "gpt-4-0613": 0.06 / 1000,
65 "gpt-4-32k": 0.12 / 1000,
66 "gpt-4-32k-0613": 0.12 / 1000,
67 }
68
69 @cached_property
70 def _engine(self) -> str:
71 if self.model_name is not None and self.model_name in self.CHAT_ENGINE_MODELS:
72 return "chat"
73 else:
74 return "completion"
75
76 def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
77 super().__init__(config, cache)
78 # populate model name
79 self.model_name = config.model_name() or self.DEFAULT_MODEL
80
81 # populate model params and initialize the LLM
82 model_params = config.model_params()
83 if config.logit_bias():
84 logit_bias = self._generate_logit_bias(config)
85 # if logit_bias or max_tokens is specified already, we don't want to overwrite it
86 model_params = {
87 **logit_bias,
88 **model_params,
89 }
90
91 if self._engine == "chat":
92 self.model_params = {**self.DEFAULT_PARAMS_CHAT_ENGINE, **model_params}
93 self.llm = ChatOpenAI(model_name=self.model_name, **self.model_params)
94 else:
95 self.model_params = {
96 **self.DEFAULT_PARAMS_COMPLETION_ENGINE,
97 **model_params,
98 }
99 self.llm = OpenAI(model_name=self.model_name, **self.model_params)
100
101 def _generate_logit_bias(self, config: AutolabelConfig) -> None:
102 """Generates logit bias for the labels specified in the config
103
104 Args:
105 config (AutolabelConfig): AutolabelConfig object
106
107 Returns:
108 Dict: logit bias and max tokens
109 """
110 if len(config.labels_list()) == 0:
111 logger.warning(
112 "No labels specified in the config. Skipping logit bias generation."
113 )
114 return {}
115 encoding = tiktoken.encoding_for_model(self.model_name)
116 logit_bias = {}
117 max_tokens = 0
118 for label in config.labels_list():
119 if label not in logit_bias:
120 tokens = encoding.encode(label)
121 for token in tokens:
122 logit_bias[token] = 100
123 max_tokens = max(max_tokens, len(tokens))
124
125 return {"logit_bias": logit_bias, "max_tokens": max_tokens}
126
127 def _label(self, prompts: List[str]) -> LLMResult:
128 if self._engine == "chat":
129 # Need to convert list[prompts] -> list[messages]
130 # Currently the entire prompt is stuck into the "human message"
131 # We might consider breaking this up into human vs system message in future
132 prompts = [[HumanMessage(content=prompt)] for prompt in prompts]
133 try:
134 return self.llm.generate(prompts)
135 except Exception as e:
136 print(f"Error generating from LLM: {e}, returning empty result")
137 generations = [[Generation(text="")] for _ in prompts]
138 return LLMResult(generations=generations)
139
140 def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
141 encoding = tiktoken.encoding_for_model(self.model_name)
142 num_prompt_toks = len(encoding.encode(prompt))
143 if label:
144 num_label_toks = len(encoding.encode(label))
145 else:
146 # get an upper bound
147 num_label_toks = self.model_params["max_tokens"]
148
149 cost_per_prompt_token = self.COST_PER_PROMPT_TOKEN[self.model_name]
150 cost_per_completion_token = self.COST_PER_COMPLETION_TOKEN[self.model_name]
151 return (num_prompt_toks * cost_per_prompt_token) + (
152 num_label_toks * cost_per_completion_token
153 )
154
155 def returns_token_probs(self) -> bool:
156 return (
157 self.model_name is not None
158 and self.model_name in self.MODELS_WITH_TOKEN_PROBS
159 )
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/autolabel/models/openai.py b/src/autolabel/models/openai.py
--- a/src/autolabel/models/openai.py
+++ b/src/autolabel/models/openai.py
@@ -11,6 +11,7 @@
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
+import os
logger = logging.getLogger(__name__)
@@ -78,6 +79,9 @@
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
+ if os.getenv("OPENAI_API_KEY") == None:
+ raise ValueError("OPENAI_API_KEY environment variable not set")
+
# populate model params and initialize the LLM
model_params = config.model_params()
if config.logit_bias():
| {"golden_diff": "diff --git a/src/autolabel/models/openai.py b/src/autolabel/models/openai.py\n--- a/src/autolabel/models/openai.py\n+++ b/src/autolabel/models/openai.py\n@@ -11,6 +11,7 @@\n from autolabel.configs import AutolabelConfig\n from autolabel.cache import BaseCache\n \n+import os\n \n logger = logging.getLogger(__name__)\n \n@@ -78,6 +79,9 @@\n # populate model name\n self.model_name = config.model_name() or self.DEFAULT_MODEL\n \n+ if os.getenv(\"OPENAI_API_KEY\") == None:\n+ raise ValueError(\"OPENAI_API_KEY environment variable not set\")\n+\n # populate model params and initialize the LLM\n model_params = config.model_params()\n if config.logit_bias():\n", "issue": "[Bug]: Confusing error message when OpenAI API Key is missing\n**Describe the bug**\r\nWhen the OpenAI API key is missing, we see the following error message:\r\n\r\n```\r\nFile ~/.pyenv/versions/3.8.13/envs/refuel-main/lib/python3.8/site-packages/langchain/load/serializable.py:64, in Serializable.__init__(self, **kwargs)\r\n 63 def __init__(self, **kwargs: Any) -> None:\r\n---> 64 super().__init__(**kwargs)\r\n 65 self._lc_kwargs = kwargs\r\n\r\nFile ~/.pyenv/versions/3.8.13/envs/refuel-main/lib/python3.8/site-packages/pydantic/main.py:341, in pydantic.main.BaseModel.__init__()\r\n\r\nValidationError: 1 validation error for ChatOpenAI\r\n__root__\r\n Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error)\r\n```\r\n\r\nThis is an error from `langchain`, but `autolabel` doesn't allow passing `openai_api_key` as a named parameter anywhere. \r\n\r\n**To Reproduce**\r\nCall `agent.plan` for a config that uses an OpenAI model without providing the API key as an env variable. \r\n\r\n**Expected behavior**\r\nWe should throw an error that simply says that the user should add the API key as an environment variable and not that it should be a named parameter. \n", "before_files": [{"content": "from functools import cached_property\nfrom typing import List, Optional\nimport logging\n\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.llms import OpenAI\nfrom langchain.schema import LLMResult, HumanMessage, Generation\nimport tiktoken\n\nfrom autolabel.models import BaseModel\nfrom autolabel.configs import AutolabelConfig\nfrom autolabel.cache import BaseCache\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenAILLM(BaseModel):\n CHAT_ENGINE_MODELS = [\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-16k\",\n \"gpt-3.5-turbo-16k-0613\",\n \"gpt-4\",\n \"gpt-4-0613\",\n \"gpt-4-32k\",\n \"gpt-4-32k-0613\",\n ]\n MODELS_WITH_TOKEN_PROBS = [\"text-curie-001\", \"text-davinci-003\"]\n\n # Default parameters for OpenAILLM\n DEFAULT_MODEL = \"gpt-3.5-turbo\"\n DEFAULT_PARAMS_COMPLETION_ENGINE = {\n \"max_tokens\": 1000,\n \"temperature\": 0.0,\n \"model_kwargs\": {\"logprobs\": 1},\n }\n DEFAULT_PARAMS_CHAT_ENGINE = {\n \"max_tokens\": 1000,\n \"temperature\": 0.0,\n }\n\n # Reference: https://openai.com/pricing\n COST_PER_PROMPT_TOKEN = {\n \"text-davinci-003\": 0.02 / 1000,\n \"text-curie-001\": 0.002 / 1000,\n \"gpt-3.5-turbo\": 0.0015 / 1000,\n \"gpt-3.5-turbo-0613\": 0.0015 / 1000,\n \"gpt-3.5-turbo-16k\": 0.003 / 1000,\n \"gpt-3.5-turbo-16k-0613\": 0.003 / 1000,\n \"gpt-4\": 0.03 / 1000,\n \"gpt-4-0613\": 0.03 / 1000,\n \"gpt-4-32k\": 0.06 / 1000,\n \"gpt-4-32k-0613\": 0.06 / 1000,\n }\n COST_PER_COMPLETION_TOKEN = {\n \"text-davinci-003\": 0.02 / 1000,\n \"text-curie-001\": 0.002 / 1000,\n \"gpt-3.5-turbo\": 0.002 / 1000,\n \"gpt-3.5-turbo-0613\": 0.002 / 1000,\n \"gpt-3.5-turbo-16k\": 0.004 / 1000,\n \"gpt-3.5-turbo-16k-0613\": 0.004 / 1000,\n \"gpt-4\": 0.06 / 1000,\n \"gpt-4-0613\": 0.06 / 1000,\n \"gpt-4-32k\": 0.12 / 1000,\n \"gpt-4-32k-0613\": 0.12 / 1000,\n }\n\n @cached_property\n def _engine(self) -> str:\n if self.model_name is not None and self.model_name in self.CHAT_ENGINE_MODELS:\n return \"chat\"\n else:\n return \"completion\"\n\n def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:\n super().__init__(config, cache)\n # populate model name\n self.model_name = config.model_name() or self.DEFAULT_MODEL\n\n # populate model params and initialize the LLM\n model_params = config.model_params()\n if config.logit_bias():\n logit_bias = self._generate_logit_bias(config)\n # if logit_bias or max_tokens is specified already, we don't want to overwrite it\n model_params = {\n **logit_bias,\n **model_params,\n }\n\n if self._engine == \"chat\":\n self.model_params = {**self.DEFAULT_PARAMS_CHAT_ENGINE, **model_params}\n self.llm = ChatOpenAI(model_name=self.model_name, **self.model_params)\n else:\n self.model_params = {\n **self.DEFAULT_PARAMS_COMPLETION_ENGINE,\n **model_params,\n }\n self.llm = OpenAI(model_name=self.model_name, **self.model_params)\n\n def _generate_logit_bias(self, config: AutolabelConfig) -> None:\n \"\"\"Generates logit bias for the labels specified in the config\n\n Args:\n config (AutolabelConfig): AutolabelConfig object\n\n Returns:\n Dict: logit bias and max tokens\n \"\"\"\n if len(config.labels_list()) == 0:\n logger.warning(\n \"No labels specified in the config. Skipping logit bias generation.\"\n )\n return {}\n encoding = tiktoken.encoding_for_model(self.model_name)\n logit_bias = {}\n max_tokens = 0\n for label in config.labels_list():\n if label not in logit_bias:\n tokens = encoding.encode(label)\n for token in tokens:\n logit_bias[token] = 100\n max_tokens = max(max_tokens, len(tokens))\n\n return {\"logit_bias\": logit_bias, \"max_tokens\": max_tokens}\n\n def _label(self, prompts: List[str]) -> LLMResult:\n if self._engine == \"chat\":\n # Need to convert list[prompts] -> list[messages]\n # Currently the entire prompt is stuck into the \"human message\"\n # We might consider breaking this up into human vs system message in future\n prompts = [[HumanMessage(content=prompt)] for prompt in prompts]\n try:\n return self.llm.generate(prompts)\n except Exception as e:\n print(f\"Error generating from LLM: {e}, returning empty result\")\n generations = [[Generation(text=\"\")] for _ in prompts]\n return LLMResult(generations=generations)\n\n def get_cost(self, prompt: str, label: Optional[str] = \"\") -> float:\n encoding = tiktoken.encoding_for_model(self.model_name)\n num_prompt_toks = len(encoding.encode(prompt))\n if label:\n num_label_toks = len(encoding.encode(label))\n else:\n # get an upper bound\n num_label_toks = self.model_params[\"max_tokens\"]\n\n cost_per_prompt_token = self.COST_PER_PROMPT_TOKEN[self.model_name]\n cost_per_completion_token = self.COST_PER_COMPLETION_TOKEN[self.model_name]\n return (num_prompt_toks * cost_per_prompt_token) + (\n num_label_toks * cost_per_completion_token\n )\n\n def returns_token_probs(self) -> bool:\n return (\n self.model_name is not None\n and self.model_name in self.MODELS_WITH_TOKEN_PROBS\n )\n", "path": "src/autolabel/models/openai.py"}], "after_files": [{"content": "from functools import cached_property\nfrom typing import List, Optional\nimport logging\n\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.llms import OpenAI\nfrom langchain.schema import LLMResult, HumanMessage, Generation\nimport tiktoken\n\nfrom autolabel.models import BaseModel\nfrom autolabel.configs import AutolabelConfig\nfrom autolabel.cache import BaseCache\n\nimport os\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenAILLM(BaseModel):\n CHAT_ENGINE_MODELS = [\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-16k\",\n \"gpt-3.5-turbo-16k-0613\",\n \"gpt-4\",\n \"gpt-4-0613\",\n \"gpt-4-32k\",\n \"gpt-4-32k-0613\",\n ]\n MODELS_WITH_TOKEN_PROBS = [\"text-curie-001\", \"text-davinci-003\"]\n\n # Default parameters for OpenAILLM\n DEFAULT_MODEL = \"gpt-3.5-turbo\"\n DEFAULT_PARAMS_COMPLETION_ENGINE = {\n \"max_tokens\": 1000,\n \"temperature\": 0.0,\n \"model_kwargs\": {\"logprobs\": 1},\n }\n DEFAULT_PARAMS_CHAT_ENGINE = {\n \"max_tokens\": 1000,\n \"temperature\": 0.0,\n }\n\n # Reference: https://openai.com/pricing\n COST_PER_PROMPT_TOKEN = {\n \"text-davinci-003\": 0.02 / 1000,\n \"text-curie-001\": 0.002 / 1000,\n \"gpt-3.5-turbo\": 0.0015 / 1000,\n \"gpt-3.5-turbo-0613\": 0.0015 / 1000,\n \"gpt-3.5-turbo-16k\": 0.003 / 1000,\n \"gpt-3.5-turbo-16k-0613\": 0.003 / 1000,\n \"gpt-4\": 0.03 / 1000,\n \"gpt-4-0613\": 0.03 / 1000,\n \"gpt-4-32k\": 0.06 / 1000,\n \"gpt-4-32k-0613\": 0.06 / 1000,\n }\n COST_PER_COMPLETION_TOKEN = {\n \"text-davinci-003\": 0.02 / 1000,\n \"text-curie-001\": 0.002 / 1000,\n \"gpt-3.5-turbo\": 0.002 / 1000,\n \"gpt-3.5-turbo-0613\": 0.002 / 1000,\n \"gpt-3.5-turbo-16k\": 0.004 / 1000,\n \"gpt-3.5-turbo-16k-0613\": 0.004 / 1000,\n \"gpt-4\": 0.06 / 1000,\n \"gpt-4-0613\": 0.06 / 1000,\n \"gpt-4-32k\": 0.12 / 1000,\n \"gpt-4-32k-0613\": 0.12 / 1000,\n }\n\n @cached_property\n def _engine(self) -> str:\n if self.model_name is not None and self.model_name in self.CHAT_ENGINE_MODELS:\n return \"chat\"\n else:\n return \"completion\"\n\n def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:\n super().__init__(config, cache)\n # populate model name\n self.model_name = config.model_name() or self.DEFAULT_MODEL\n\n if os.getenv(\"OPENAI_API_KEY\") == None:\n raise ValueError(\"OPENAI_API_KEY environment variable not set\")\n\n # populate model params and initialize the LLM\n model_params = config.model_params()\n if config.logit_bias():\n logit_bias = self._generate_logit_bias(config)\n # if logit_bias or max_tokens is specified already, we don't want to overwrite it\n model_params = {\n **logit_bias,\n **model_params,\n }\n\n if self._engine == \"chat\":\n self.model_params = {**self.DEFAULT_PARAMS_CHAT_ENGINE, **model_params}\n self.llm = ChatOpenAI(model_name=self.model_name, **self.model_params)\n else:\n self.model_params = {\n **self.DEFAULT_PARAMS_COMPLETION_ENGINE,\n **model_params,\n }\n self.llm = OpenAI(model_name=self.model_name, **self.model_params)\n\n def _generate_logit_bias(self, config: AutolabelConfig) -> None:\n \"\"\"Generates logit bias for the labels specified in the config\n\n Args:\n config (AutolabelConfig): AutolabelConfig object\n\n Returns:\n Dict: logit bias and max tokens\n \"\"\"\n if len(config.labels_list()) == 0:\n logger.warning(\n \"No labels specified in the config. Skipping logit bias generation.\"\n )\n return {}\n encoding = tiktoken.encoding_for_model(self.model_name)\n logit_bias = {}\n max_tokens = 0\n for label in config.labels_list():\n if label not in logit_bias:\n tokens = encoding.encode(label)\n for token in tokens:\n logit_bias[token] = 100\n max_tokens = max(max_tokens, len(tokens))\n\n return {\"logit_bias\": logit_bias, \"max_tokens\": max_tokens}\n\n def _label(self, prompts: List[str]) -> LLMResult:\n if self._engine == \"chat\":\n # Need to convert list[prompts] -> list[messages]\n # Currently the entire prompt is stuck into the \"human message\"\n # We might consider breaking this up into human vs system message in future\n prompts = [[HumanMessage(content=prompt)] for prompt in prompts]\n try:\n return self.llm.generate(prompts)\n except Exception as e:\n print(f\"Error generating from LLM: {e}, returning empty result\")\n generations = [[Generation(text=\"\")] for _ in prompts]\n return LLMResult(generations=generations)\n\n def get_cost(self, prompt: str, label: Optional[str] = \"\") -> float:\n encoding = tiktoken.encoding_for_model(self.model_name)\n num_prompt_toks = len(encoding.encode(prompt))\n if label:\n num_label_toks = len(encoding.encode(label))\n else:\n # get an upper bound\n num_label_toks = self.model_params[\"max_tokens\"]\n\n cost_per_prompt_token = self.COST_PER_PROMPT_TOKEN[self.model_name]\n cost_per_completion_token = self.COST_PER_COMPLETION_TOKEN[self.model_name]\n return (num_prompt_toks * cost_per_prompt_token) + (\n num_label_toks * cost_per_completion_token\n )\n\n def returns_token_probs(self) -> bool:\n return (\n self.model_name is not None\n and self.model_name in self.MODELS_WITH_TOKEN_PROBS\n )\n", "path": "src/autolabel/models/openai.py"}]} | 2,667 | 180 |
gh_patches_debug_29585 | rasdani/github-patches | git_diff | gratipay__gratipay.com-2714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dollar amounts no longer showing two decimals
Presumably a regression from recent i18n work.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/utils/i18n.py`
Content:
```
1 from __future__ import print_function, unicode_literals
2
3 from io import BytesIO
4 import os
5 import re
6 from unicodedata import combining, normalize
7
8 from aspen.resources.pagination import parse_specline, split_and_escape
9 from aspen.utils import utcnow
10 from babel.core import Locale
11 from babel.dates import format_timedelta
12 from babel.messages.pofile import Catalog, read_po
13 from babel.messages.extract import extract_python
14 from babel.numbers import (
15 format_currency, format_decimal, format_number, format_percent,
16 get_decimal_symbol, parse_decimal, parse_pattern
17 )
18 import jinja2.ext
19
20 from gratipay.utils import COUNTRIES, COUNTRIES_MAP
21
22
23 ternary_re = re.compile(r'^\(? *(.+?) *\? *(.+?) *: *(.+?) *\)?$')
24 and_re = re.compile(r' *&& *')
25 or_re = re.compile(r' *\|\| *')
26
27
28 def ternary_sub(m):
29 g1, g2, g3 = m.groups()
30 return '%s if %s else %s' % (g2, g1, ternary_re.sub(ternary_sub, g3))
31
32
33 def get_function_from_rule(rule):
34 rule = ternary_re.sub(ternary_sub, rule.strip())
35 rule = and_re.sub(' and ', rule)
36 rule = or_re.sub(' or ', rule)
37 return eval('lambda n: ' + rule, {'__builtins__': {}})
38
39
40 def get_text(request, loc, s, *a, **kw):
41 msg = loc.catalog.get(s)
42 if msg:
43 s = msg.string or s
44 if a or kw:
45 if isinstance(s, bytes):
46 s = s.decode('ascii')
47 return s.format(*a, **kw)
48 return s
49
50
51 def n_get_text(request, loc, s, p, n, *a, **kw):
52 n = n or 0
53 msg = loc.catalog.get((s, p))
54 s2 = None
55 if msg:
56 try:
57 s2 = msg.string[loc.catalog.plural_func(n)]
58 except Exception as e:
59 request.website.tell_sentry(e, request)
60 if s2 is None:
61 loc = 'en'
62 s2 = s if n == 1 else p
63 kw['n'] = format_number(n, locale=loc) or n
64 if isinstance(s2, bytes):
65 s2 = s2.decode('ascii')
66 return s2.format(*a, **kw)
67
68
69 def to_age(dt, loc):
70 return format_timedelta(dt - utcnow(), add_direction=True, locale=loc)
71
72
73 def regularize_locale(loc):
74 return loc.split('-', 1)[0].lower()
75
76
77 def strip_accents(s):
78 return ''.join(c for c in normalize('NFKD', s) if not combining(c))
79
80
81 def load_langs(localeDir):
82 key = lambda t: strip_accents(t[1])
83 langs = {}
84 for file in os.listdir(localeDir):
85 parts = file.split(".")
86 if len(parts) == 2 and parts[1] == "po":
87 lang = regularize_locale(parts[0])
88 with open(os.path.join(localeDir, file)) as f:
89 l = langs[lang] = Locale(lang)
90 c = l.catalog = read_po(f)
91 c.plural_func = get_function_from_rule(c.plural_expr)
92 try:
93 l.countries_map = {k: l.territories[k] for k in COUNTRIES_MAP}
94 l.countries = sorted(l.countries_map.items(), key=key)
95 except KeyError:
96 l.countries_map = COUNTRIES_MAP
97 l.countries = COUNTRIES
98 return langs
99
100
101 # Load the locales
102 LOCALES = load_langs("i18n")
103
104 # Add the default English locale
105 LOCALE_EN = LOCALES['en'] = Locale('en')
106 LOCALE_EN.catalog = Catalog('en')
107 LOCALE_EN.catalog.plural_func = lambda n: n != 1
108 LOCALE_EN.countries = COUNTRIES
109 LOCALE_EN.countries_map = COUNTRIES_MAP
110
111 # Patch the locales to look less formal
112 LOCALE_EN.currency_formats[None] = parse_pattern('\xa4#,##0.##')
113 LOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.##\u202f\xa4')
114 LOCALES['fr'].currency_symbols['USD'] = '$'
115
116
117 def get_locale_for_request(request):
118 accept_lang = request.headers.get("Accept-Language", "")
119 languages = (lang.split(";", 1)[0] for lang in accept_lang.split(","))
120 for lang in languages:
121 lang = regularize_locale(lang)
122 loc = LOCALES.get(lang)
123 if loc:
124 return loc
125 return LOCALE_EN
126
127
128 def inbound(request):
129 context = request.context
130 loc = context.locale = get_locale_for_request(request)
131 context.decimal_symbol = get_decimal_symbol(locale=loc)
132 context._ = lambda s, *a, **kw: get_text(request, loc, s, *a, **kw)
133 context.ngettext = lambda *a, **kw: n_get_text(request, loc, *a, **kw)
134 context.format_number = lambda *a: format_number(*a, locale=loc)
135 context.format_decimal = lambda *a: format_decimal(*a, locale=loc)
136 context.format_currency = lambda *a: format_currency(*a, locale=loc)
137 context.format_percent = lambda *a: format_percent(*a, locale=loc)
138 context.parse_decimal = lambda *a: parse_decimal(*a, locale=loc)
139 def _to_age(delta):
140 try:
141 return to_age(delta, loc)
142 except:
143 return to_age(delta, 'en')
144 context.to_age = _to_age
145
146
147 def extract_spt(fileobj, *args, **kw):
148 pages = list(split_and_escape(fileobj.read()))
149 npages = len(pages)
150 for i, page in enumerate(pages, 1):
151 f = BytesIO(b'\n' * page.offset + page.content)
152 content_type, renderer = parse_specline(page.header)
153 extractor = None
154 if (i == npages and not page.header) or content_type == 'text/html' or renderer == 'jinja2':
155 extractor = jinja2.ext.babel_extract
156 elif i < 3:
157 extractor = extract_python
158 if extractor:
159 for match in extractor(f, *args, **kw):
160 yield match
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gratipay/utils/i18n.py b/gratipay/utils/i18n.py
--- a/gratipay/utils/i18n.py
+++ b/gratipay/utils/i18n.py
@@ -109,8 +109,7 @@
LOCALE_EN.countries_map = COUNTRIES_MAP
# Patch the locales to look less formal
-LOCALE_EN.currency_formats[None] = parse_pattern('\xa4#,##0.##')
-LOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.##\u202f\xa4')
+LOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.00\u202f\xa4')
LOCALES['fr'].currency_symbols['USD'] = '$'
@@ -125,6 +124,13 @@
return LOCALE_EN
+def format_currency_with_options(number, currency, locale=LOCALE_EN, trailing_zeroes=True):
+ s = format_currency(number, currency, locale=locale)
+ if not trailing_zeroes:
+ s = s.replace(get_decimal_symbol(locale)+'00', '')
+ return s
+
+
def inbound(request):
context = request.context
loc = context.locale = get_locale_for_request(request)
@@ -133,7 +139,7 @@
context.ngettext = lambda *a, **kw: n_get_text(request, loc, *a, **kw)
context.format_number = lambda *a: format_number(*a, locale=loc)
context.format_decimal = lambda *a: format_decimal(*a, locale=loc)
- context.format_currency = lambda *a: format_currency(*a, locale=loc)
+ context.format_currency = lambda *a, **kw: format_currency_with_options(*a, locale=loc, **kw)
context.format_percent = lambda *a: format_percent(*a, locale=loc)
context.parse_decimal = lambda *a: parse_decimal(*a, locale=loc)
def _to_age(delta):
| {"golden_diff": "diff --git a/gratipay/utils/i18n.py b/gratipay/utils/i18n.py\n--- a/gratipay/utils/i18n.py\n+++ b/gratipay/utils/i18n.py\n@@ -109,8 +109,7 @@\n LOCALE_EN.countries_map = COUNTRIES_MAP\n \n # Patch the locales to look less formal\n-LOCALE_EN.currency_formats[None] = parse_pattern('\\xa4#,##0.##')\n-LOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.##\\u202f\\xa4')\n+LOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.00\\u202f\\xa4')\n LOCALES['fr'].currency_symbols['USD'] = '$'\n \n \n@@ -125,6 +124,13 @@\n return LOCALE_EN\n \n \n+def format_currency_with_options(number, currency, locale=LOCALE_EN, trailing_zeroes=True):\n+ s = format_currency(number, currency, locale=locale)\n+ if not trailing_zeroes:\n+ s = s.replace(get_decimal_symbol(locale)+'00', '')\n+ return s\n+\n+\n def inbound(request):\n context = request.context\n loc = context.locale = get_locale_for_request(request)\n@@ -133,7 +139,7 @@\n context.ngettext = lambda *a, **kw: n_get_text(request, loc, *a, **kw)\n context.format_number = lambda *a: format_number(*a, locale=loc)\n context.format_decimal = lambda *a: format_decimal(*a, locale=loc)\n- context.format_currency = lambda *a: format_currency(*a, locale=loc)\n+ context.format_currency = lambda *a, **kw: format_currency_with_options(*a, locale=loc, **kw)\n context.format_percent = lambda *a: format_percent(*a, locale=loc)\n context.parse_decimal = lambda *a: parse_decimal(*a, locale=loc)\n def _to_age(delta):\n", "issue": "dollar amounts no longer showing two decimals\nPresumably a regression from recent i18n work.\n\n\n\n", "before_files": [{"content": "from __future__ import print_function, unicode_literals\n\nfrom io import BytesIO\nimport os\nimport re\nfrom unicodedata import combining, normalize\n\nfrom aspen.resources.pagination import parse_specline, split_and_escape\nfrom aspen.utils import utcnow\nfrom babel.core import Locale\nfrom babel.dates import format_timedelta\nfrom babel.messages.pofile import Catalog, read_po\nfrom babel.messages.extract import extract_python\nfrom babel.numbers import (\n format_currency, format_decimal, format_number, format_percent,\n get_decimal_symbol, parse_decimal, parse_pattern\n)\nimport jinja2.ext\n\nfrom gratipay.utils import COUNTRIES, COUNTRIES_MAP\n\n\nternary_re = re.compile(r'^\\(? *(.+?) *\\? *(.+?) *: *(.+?) *\\)?$')\nand_re = re.compile(r' *&& *')\nor_re = re.compile(r' *\\|\\| *')\n\n\ndef ternary_sub(m):\n g1, g2, g3 = m.groups()\n return '%s if %s else %s' % (g2, g1, ternary_re.sub(ternary_sub, g3))\n\n\ndef get_function_from_rule(rule):\n rule = ternary_re.sub(ternary_sub, rule.strip())\n rule = and_re.sub(' and ', rule)\n rule = or_re.sub(' or ', rule)\n return eval('lambda n: ' + rule, {'__builtins__': {}})\n\n\ndef get_text(request, loc, s, *a, **kw):\n msg = loc.catalog.get(s)\n if msg:\n s = msg.string or s\n if a or kw:\n if isinstance(s, bytes):\n s = s.decode('ascii')\n return s.format(*a, **kw)\n return s\n\n\ndef n_get_text(request, loc, s, p, n, *a, **kw):\n n = n or 0\n msg = loc.catalog.get((s, p))\n s2 = None\n if msg:\n try:\n s2 = msg.string[loc.catalog.plural_func(n)]\n except Exception as e:\n request.website.tell_sentry(e, request)\n if s2 is None:\n loc = 'en'\n s2 = s if n == 1 else p\n kw['n'] = format_number(n, locale=loc) or n\n if isinstance(s2, bytes):\n s2 = s2.decode('ascii')\n return s2.format(*a, **kw)\n\n\ndef to_age(dt, loc):\n return format_timedelta(dt - utcnow(), add_direction=True, locale=loc)\n\n\ndef regularize_locale(loc):\n return loc.split('-', 1)[0].lower()\n\n\ndef strip_accents(s):\n return ''.join(c for c in normalize('NFKD', s) if not combining(c))\n\n\ndef load_langs(localeDir):\n key = lambda t: strip_accents(t[1])\n langs = {}\n for file in os.listdir(localeDir):\n parts = file.split(\".\")\n if len(parts) == 2 and parts[1] == \"po\":\n lang = regularize_locale(parts[0])\n with open(os.path.join(localeDir, file)) as f:\n l = langs[lang] = Locale(lang)\n c = l.catalog = read_po(f)\n c.plural_func = get_function_from_rule(c.plural_expr)\n try:\n l.countries_map = {k: l.territories[k] for k in COUNTRIES_MAP}\n l.countries = sorted(l.countries_map.items(), key=key)\n except KeyError:\n l.countries_map = COUNTRIES_MAP\n l.countries = COUNTRIES\n return langs\n\n\n# Load the locales\nLOCALES = load_langs(\"i18n\")\n\n# Add the default English locale\nLOCALE_EN = LOCALES['en'] = Locale('en')\nLOCALE_EN.catalog = Catalog('en')\nLOCALE_EN.catalog.plural_func = lambda n: n != 1\nLOCALE_EN.countries = COUNTRIES\nLOCALE_EN.countries_map = COUNTRIES_MAP\n\n# Patch the locales to look less formal\nLOCALE_EN.currency_formats[None] = parse_pattern('\\xa4#,##0.##')\nLOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.##\\u202f\\xa4')\nLOCALES['fr'].currency_symbols['USD'] = '$'\n\n\ndef get_locale_for_request(request):\n accept_lang = request.headers.get(\"Accept-Language\", \"\")\n languages = (lang.split(\";\", 1)[0] for lang in accept_lang.split(\",\"))\n for lang in languages:\n lang = regularize_locale(lang)\n loc = LOCALES.get(lang)\n if loc:\n return loc\n return LOCALE_EN\n\n\ndef inbound(request):\n context = request.context\n loc = context.locale = get_locale_for_request(request)\n context.decimal_symbol = get_decimal_symbol(locale=loc)\n context._ = lambda s, *a, **kw: get_text(request, loc, s, *a, **kw)\n context.ngettext = lambda *a, **kw: n_get_text(request, loc, *a, **kw)\n context.format_number = lambda *a: format_number(*a, locale=loc)\n context.format_decimal = lambda *a: format_decimal(*a, locale=loc)\n context.format_currency = lambda *a: format_currency(*a, locale=loc)\n context.format_percent = lambda *a: format_percent(*a, locale=loc)\n context.parse_decimal = lambda *a: parse_decimal(*a, locale=loc)\n def _to_age(delta):\n try:\n return to_age(delta, loc)\n except:\n return to_age(delta, 'en')\n context.to_age = _to_age\n\n\ndef extract_spt(fileobj, *args, **kw):\n pages = list(split_and_escape(fileobj.read()))\n npages = len(pages)\n for i, page in enumerate(pages, 1):\n f = BytesIO(b'\\n' * page.offset + page.content)\n content_type, renderer = parse_specline(page.header)\n extractor = None\n if (i == npages and not page.header) or content_type == 'text/html' or renderer == 'jinja2':\n extractor = jinja2.ext.babel_extract\n elif i < 3:\n extractor = extract_python\n if extractor:\n for match in extractor(f, *args, **kw):\n yield match\n", "path": "gratipay/utils/i18n.py"}], "after_files": [{"content": "from __future__ import print_function, unicode_literals\n\nfrom io import BytesIO\nimport os\nimport re\nfrom unicodedata import combining, normalize\n\nfrom aspen.resources.pagination import parse_specline, split_and_escape\nfrom aspen.utils import utcnow\nfrom babel.core import Locale\nfrom babel.dates import format_timedelta\nfrom babel.messages.pofile import Catalog, read_po\nfrom babel.messages.extract import extract_python\nfrom babel.numbers import (\n format_currency, format_decimal, format_number, format_percent,\n get_decimal_symbol, parse_decimal, parse_pattern\n)\nimport jinja2.ext\n\nfrom gratipay.utils import COUNTRIES, COUNTRIES_MAP\n\n\nternary_re = re.compile(r'^\\(? *(.+?) *\\? *(.+?) *: *(.+?) *\\)?$')\nand_re = re.compile(r' *&& *')\nor_re = re.compile(r' *\\|\\| *')\n\n\ndef ternary_sub(m):\n g1, g2, g3 = m.groups()\n return '%s if %s else %s' % (g2, g1, ternary_re.sub(ternary_sub, g3))\n\n\ndef get_function_from_rule(rule):\n rule = ternary_re.sub(ternary_sub, rule.strip())\n rule = and_re.sub(' and ', rule)\n rule = or_re.sub(' or ', rule)\n return eval('lambda n: ' + rule, {'__builtins__': {}})\n\n\ndef get_text(request, loc, s, *a, **kw):\n msg = loc.catalog.get(s)\n if msg:\n s = msg.string or s\n if a or kw:\n if isinstance(s, bytes):\n s = s.decode('ascii')\n return s.format(*a, **kw)\n return s\n\n\ndef n_get_text(request, loc, s, p, n, *a, **kw):\n n = n or 0\n msg = loc.catalog.get((s, p))\n s2 = None\n if msg:\n try:\n s2 = msg.string[loc.catalog.plural_func(n)]\n except Exception as e:\n request.website.tell_sentry(e, request)\n if s2 is None:\n loc = 'en'\n s2 = s if n == 1 else p\n kw['n'] = format_number(n, locale=loc) or n\n if isinstance(s2, bytes):\n s2 = s2.decode('ascii')\n return s2.format(*a, **kw)\n\n\ndef to_age(dt, loc):\n return format_timedelta(dt - utcnow(), add_direction=True, locale=loc)\n\n\ndef regularize_locale(loc):\n return loc.split('-', 1)[0].lower()\n\n\ndef strip_accents(s):\n return ''.join(c for c in normalize('NFKD', s) if not combining(c))\n\n\ndef load_langs(localeDir):\n key = lambda t: strip_accents(t[1])\n langs = {}\n for file in os.listdir(localeDir):\n parts = file.split(\".\")\n if len(parts) == 2 and parts[1] == \"po\":\n lang = regularize_locale(parts[0])\n with open(os.path.join(localeDir, file)) as f:\n l = langs[lang] = Locale(lang)\n c = l.catalog = read_po(f)\n c.plural_func = get_function_from_rule(c.plural_expr)\n try:\n l.countries_map = {k: l.territories[k] for k in COUNTRIES_MAP}\n l.countries = sorted(l.countries_map.items(), key=key)\n except KeyError:\n l.countries_map = COUNTRIES_MAP\n l.countries = COUNTRIES\n return langs\n\n\n# Load the locales\nLOCALES = load_langs(\"i18n\")\n\n# Add the default English locale\nLOCALE_EN = LOCALES['en'] = Locale('en')\nLOCALE_EN.catalog = Catalog('en')\nLOCALE_EN.catalog.plural_func = lambda n: n != 1\nLOCALE_EN.countries = COUNTRIES\nLOCALE_EN.countries_map = COUNTRIES_MAP\n\n# Patch the locales to look less formal\nLOCALES['fr'].currency_formats[None] = parse_pattern('#,##0.00\\u202f\\xa4')\nLOCALES['fr'].currency_symbols['USD'] = '$'\n\n\ndef get_locale_for_request(request):\n accept_lang = request.headers.get(\"Accept-Language\", \"\")\n languages = (lang.split(\";\", 1)[0] for lang in accept_lang.split(\",\"))\n for lang in languages:\n lang = regularize_locale(lang)\n loc = LOCALES.get(lang)\n if loc:\n return loc\n return LOCALE_EN\n\n\ndef format_currency_with_options(number, currency, locale=LOCALE_EN, trailing_zeroes=True):\n s = format_currency(number, currency, locale=locale)\n if not trailing_zeroes:\n s = s.replace(get_decimal_symbol(locale)+'00', '')\n return s\n\n\ndef inbound(request):\n context = request.context\n loc = context.locale = get_locale_for_request(request)\n context.decimal_symbol = get_decimal_symbol(locale=loc)\n context._ = lambda s, *a, **kw: get_text(request, loc, s, *a, **kw)\n context.ngettext = lambda *a, **kw: n_get_text(request, loc, *a, **kw)\n context.format_number = lambda *a: format_number(*a, locale=loc)\n context.format_decimal = lambda *a: format_decimal(*a, locale=loc)\n context.format_currency = lambda *a, **kw: format_currency_with_options(*a, locale=loc, **kw)\n context.format_percent = lambda *a: format_percent(*a, locale=loc)\n context.parse_decimal = lambda *a: parse_decimal(*a, locale=loc)\n def _to_age(delta):\n try:\n return to_age(delta, loc)\n except:\n return to_age(delta, 'en')\n context.to_age = _to_age\n\n\ndef extract_spt(fileobj, *args, **kw):\n pages = list(split_and_escape(fileobj.read()))\n npages = len(pages)\n for i, page in enumerate(pages, 1):\n f = BytesIO(b'\\n' * page.offset + page.content)\n content_type, renderer = parse_specline(page.header)\n extractor = None\n if (i == npages and not page.header) or content_type == 'text/html' or renderer == 'jinja2':\n extractor = jinja2.ext.babel_extract\n elif i < 3:\n extractor = extract_python\n if extractor:\n for match in extractor(f, *args, **kw):\n yield match\n", "path": "gratipay/utils/i18n.py"}]} | 2,155 | 452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.