problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_1246 | rasdani/github-patches | git_diff | getsentry__sentry-15491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Simple typo in the compact docstring for utils.functional
## Important Details
How are you running Sentry?
* [ ] On-Premise docker [Version xyz]
* [ ] Saas (sentry.io)
* [x] Other [briefly describe your environment]
Observed documentation - not running sentry.
## Description
Simple typo should be values rather than valules.
## Steps to Reproduce
1. Observe docstring in utils.functional.compact method
### What you expected to happen
Should be values rather than valules.
### Possible Solution
Replace valules with values.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/utils/functional.py`
Content:
```
1 from __future__ import absolute_import
2
3 import six
4
5 from django.utils.functional import empty
6
7
8 def extract_lazy_object(lo):
9 """
10 Unwrap a LazyObject and return the inner object. Whatever that may be.
11
12 ProTip: This is relying on `django.utils.functional.empty`, which may
13 or may not be removed in the future. It's 100% undocumented.
14 """
15 if not hasattr(lo, "_wrapped"):
16 return lo
17 if lo._wrapped is empty:
18 lo._setup()
19 return lo._wrapped
20
21
22 def apply_values(function, mapping):
23 """\
24 Applies ``function`` to a sequence containing all of the values in the
25 provided mapping, returing a new mapping with the values replaced with
26 the results of the provided function.
27
28 >>> apply_values(
29 ... lambda values: map(u'{} fish'.format, values),
30 ... {1: 'red', 2: 'blue'},
31 ... )
32 {1: u'red fish', 2: u'blue fish'}
33 """
34 if not mapping:
35 return {}
36
37 keys, values = zip(*mapping.items())
38 return dict(zip(keys, function(values)))
39
40
41 def compact(seq):
42 """
43 Removes ``None`` values from various sequence-based data structures.
44
45 dict:
46 Removes keys with a corresponding ``None`` value.
47
48 list:
49 Removes ``None`` valules.
50
51 >>> compact({'foo': 'bar', 'baz': None})
52 {'foo': 'bar'}
53
54 >>> compact([1, None, 2])
55 [1, 2]
56 """
57 if isinstance(seq, dict):
58 return {k: v for k, v in six.iteritems(seq) if v is not None}
59
60 elif isinstance(seq, list):
61 return [k for k in seq if k is not None]
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/utils/functional.py b/src/sentry/utils/functional.py
--- a/src/sentry/utils/functional.py
+++ b/src/sentry/utils/functional.py
@@ -46,7 +46,7 @@
Removes keys with a corresponding ``None`` value.
list:
- Removes ``None`` valules.
+ Removes ``None`` values.
>>> compact({'foo': 'bar', 'baz': None})
{'foo': 'bar'}
| {"golden_diff": "diff --git a/src/sentry/utils/functional.py b/src/sentry/utils/functional.py\n--- a/src/sentry/utils/functional.py\n+++ b/src/sentry/utils/functional.py\n@@ -46,7 +46,7 @@\n Removes keys with a corresponding ``None`` value.\n \n list:\n- Removes ``None`` valules.\n+ Removes ``None`` values.\n \n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n", "issue": "Simple typo in the compact docstring for utils.functional\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [ ] On-Premise docker [Version xyz]\r\n* [ ] Saas (sentry.io)\r\n* [x] Other [briefly describe your environment]\r\nObserved documentation - not running sentry.\r\n\r\n## Description\r\n\r\nSimple typo should be values rather than valules.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Observe docstring in utils.functional.compact method\r\n\r\n### What you expected to happen\r\n\r\nShould be values rather than valules.\r\n\r\n### Possible Solution\r\n\r\nReplace valules with values.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom django.utils.functional import empty\n\n\ndef extract_lazy_object(lo):\n \"\"\"\n Unwrap a LazyObject and return the inner object. Whatever that may be.\n\n ProTip: This is relying on `django.utils.functional.empty`, which may\n or may not be removed in the future. It's 100% undocumented.\n \"\"\"\n if not hasattr(lo, \"_wrapped\"):\n return lo\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped\n\n\ndef apply_values(function, mapping):\n \"\"\"\\\n Applies ``function`` to a sequence containing all of the values in the\n provided mapping, returing a new mapping with the values replaced with\n the results of the provided function.\n\n >>> apply_values(\n ... lambda values: map(u'{} fish'.format, values),\n ... {1: 'red', 2: 'blue'},\n ... )\n {1: u'red fish', 2: u'blue fish'}\n \"\"\"\n if not mapping:\n return {}\n\n keys, values = zip(*mapping.items())\n return dict(zip(keys, function(values)))\n\n\ndef compact(seq):\n \"\"\"\n Removes ``None`` values from various sequence-based data structures.\n\n dict:\n Removes keys with a corresponding ``None`` value.\n\n list:\n Removes ``None`` valules.\n\n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n\n >>> compact([1, None, 2])\n [1, 2]\n \"\"\"\n if isinstance(seq, dict):\n return {k: v for k, v in six.iteritems(seq) if v is not None}\n\n elif isinstance(seq, list):\n return [k for k in seq if k is not None]\n", "path": "src/sentry/utils/functional.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom django.utils.functional import empty\n\n\ndef extract_lazy_object(lo):\n \"\"\"\n Unwrap a LazyObject and return the inner object. Whatever that may be.\n\n ProTip: This is relying on `django.utils.functional.empty`, which may\n or may not be removed in the future. It's 100% undocumented.\n \"\"\"\n if not hasattr(lo, \"_wrapped\"):\n return lo\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped\n\n\ndef apply_values(function, mapping):\n \"\"\"\\\n Applies ``function`` to a sequence containing all of the values in the\n provided mapping, returing a new mapping with the values replaced with\n the results of the provided function.\n\n >>> apply_values(\n ... lambda values: map(u'{} fish'.format, values),\n ... {1: 'red', 2: 'blue'},\n ... )\n {1: u'red fish', 2: u'blue fish'}\n \"\"\"\n if not mapping:\n return {}\n\n keys, values = zip(*mapping.items())\n return dict(zip(keys, function(values)))\n\n\ndef compact(seq):\n \"\"\"\n Removes ``None`` values from various sequence-based data structures.\n\n dict:\n Removes keys with a corresponding ``None`` value.\n\n list:\n Removes ``None`` values.\n\n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n\n >>> compact([1, None, 2])\n [1, 2]\n \"\"\"\n if isinstance(seq, dict):\n return {k: v for k, v in six.iteritems(seq) if v is not None}\n\n elif isinstance(seq, list):\n return [k for k in seq if k is not None]\n", "path": "src/sentry/utils/functional.py"}]} | 897 | 106 |
gh_patches_debug_20776 | rasdani/github-patches | git_diff | google__turbinia-1110 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Config module 'imp' deprecation warning
Currently, the config module uses a deprecated library method to load the config file into a module which causes a DeprecationWarning
```============================================= warnings summary =============================================
turbinia/config/__init__.py:19
/workspaces/turbinia/turbinia/config/__init__.py:19: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
import imp
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/config/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Basic Turbinia config."""
16
17 from __future__ import unicode_literals
18
19 import imp
20 import itertools
21 import logging
22 import os
23 import sys
24
25 from turbinia import TurbiniaException
26
27 DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
28
29 # Look for config files with these names
30 CONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config_tmpl.py']
31 # Look in homedir first, then /etc/turbinia
32 CONFIGPATH = [
33 os.path.expanduser('~'),
34 '/etc/turbinia',
35 os.path.dirname(os.path.abspath(__file__)),
36 ]
37 # Config setup reminder for cleaner error handling on empty configs.
38 CONFIG_MSG = (
39 'Copy turbinia/config/turbinia_config_tmpl.py to ~/.turbiniarc '
40 'or /etc/turbinia/turbinia.conf, edit, and re-run.')
41
42 # Required config vars
43 REQUIRED_VARS = [
44 # Turbinia Config
45 'INSTANCE_ID',
46 'STATE_MANAGER',
47 'TASK_MANAGER',
48 'LOG_DIR',
49 'LOCK_FILE',
50 'TMP_RESOURCE_DIR',
51 'RESOURCE_FILE',
52 'RESOURCE_FILE_LOCK',
53 'SCALEDOWN_WORKER_FILE',
54 'OUTPUT_DIR',
55 'TMP_DIR',
56 'SLEEP_TIME',
57 'SINGLE_RUN',
58 'MOUNT_DIR_PREFIX',
59 'SHARED_FILESYSTEM',
60 'DEBUG_TASKS',
61 'DEPENDENCIES',
62 'DOCKER_ENABLED',
63 'DISABLED_JOBS',
64 ]
65
66 # Optional config vars. Some may be mandatory depending on the configuration
67 # (e.g. if TASK_MANAGER is set to 'PSQ', then the GCE Config variables are
68 # required), but these requirements are not enforced.
69 OPTIONAL_VARS = [
70 # GCE CONFIG
71 'TURBINIA_PROJECT',
72 'TURBINIA_ZONE',
73 'TURBINIA_REGION',
74 'BUCKET_NAME',
75 'PSQ_TOPIC',
76 'PUBSUB_TOPIC',
77 'GCS_OUTPUT_PATH',
78 'RECIPE_FILE_DIR',
79 'STACKDRIVER_LOGGING',
80 'STACKDRIVER_TRACEBACK',
81 # REDIS CONFIG
82 'REDIS_HOST',
83 'REDIS_PORT',
84 'REDIS_DB',
85 # Celery config
86 'CELERY_BROKER',
87 'CELERY_BACKEND',
88 'KOMBU_BROKER',
89 'KOMBU_CHANNEL',
90 'KOMBU_DURABLE',
91 # Email config
92 'EMAIL_NOTIFICATIONS',
93 'EMAIL_HOST_ADDRESS',
94 'EMAIL_PORT',
95 'EMAIL_ADDRESS',
96 'EMAIL_PASSWORD',
97 # Prometheus config
98 'PROMETHEUS_ENABLED',
99 'PROMETHEUS_ADDR',
100 'PROMETHEUS_PORT',
101 # dfDewey config
102 'DFDEWEY_PG_HOST',
103 'DFDEWEY_PG_PORT',
104 'DFDEWEY_PG_DB_NAME',
105 'DFDEWEY_OS_HOST',
106 'DFDEWEY_OS_PORT',
107 'DFDEWEY_OS_URL',
108 # General config
109 'TURBINIA_COMMAND'
110 ]
111
112 # Environment variable to look for path data in
113 ENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'
114
115 CONFIG = None
116
117 log = logging.getLogger('turbinia')
118
119
120 def LoadConfig(config_file=None):
121 """Finds Turbinia config file and loads it.
122
123 Args:
124 config_file(str): full path to config file
125 """
126 # TODO(aarontp): Find way to not require global var here. Maybe a singleton
127 # pattern on the config class.
128 # pylint: disable=global-statement
129 global CONFIG
130 if CONFIG and not config_file:
131 log.debug(
132 'Returning cached config from {0:s} instead of reloading config'.format(
133 CONFIG.configSource))
134 return CONFIG
135
136 if not config_file:
137 log.debug('No config specified. Looking in default locations for config.')
138 # If the environment variable is set, take precedence over the pre-defined
139 # CONFIGPATHs.
140 configpath = CONFIGPATH
141 if ENVCONFIGVAR in os.environ:
142 configpath = os.environ[ENVCONFIGVAR].split(':')
143
144 # Load first file found
145 for _dir, _file in itertools.product(configpath, CONFIGFILES):
146 if os.path.exists(os.path.join(_dir, _file)):
147 config_file = os.path.join(_dir, _file)
148 break
149
150 if config_file is None:
151 raise TurbiniaException('No config files found')
152
153 log.debug('Loading config from {0:s}'.format(config_file))
154 # Warn about using fallback source config, but it's currently necessary for
155 # tests. See issue #446.
156 if 'turbinia_config_tmpl' in config_file:
157 log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))
158 try:
159 _config = imp.load_source('config', config_file)
160 except IOError as exception:
161 message = (
162 'Could not load config file {0:s}: {1!s}'.format(
163 config_file, exception))
164 log.error(message)
165 raise TurbiniaException(message)
166
167 _config.configSource = config_file
168 ValidateAndSetConfig(_config)
169
170 # Set the environment var for this so that we don't see the "No project ID
171 # could be determined." warning later.
172 if hasattr(_config, 'TURBINIA_PROJECT') and _config.TURBINIA_PROJECT:
173 os.environ['GOOGLE_CLOUD_PROJECT'] = _config.TURBINIA_PROJECT
174
175 CONFIG = _config
176 log.debug(
177 'Returning parsed config loaded from {0:s}'.format(CONFIG.configSource))
178 return _config
179
180
181 def ValidateAndSetConfig(_config):
182 """Makes sure that the config has the vars loaded and set in the module."""
183 # Explicitly set the config path
184 setattr(sys.modules[__name__], 'configSource', _config.configSource)
185
186 CONFIGVARS = REQUIRED_VARS + OPTIONAL_VARS
187 for var in CONFIGVARS:
188 empty_value = False
189 if not hasattr(_config, var):
190 if var in OPTIONAL_VARS:
191 log.debug(
192 'Setting non-existent but optional config variable {0:s} to '
193 'None'.format(var))
194 empty_value = True
195 else:
196 raise TurbiniaException(
197 'Required config attribute {0:s}:{1:s} not in config'.format(
198 _config.configSource, var))
199 if var in REQUIRED_VARS and getattr(_config, var) is None:
200 raise TurbiniaException(
201 'Config attribute {0:s}:{1:s} is not set'.format(
202 _config.configSource, var))
203
204 # Set the attribute in the current module
205 if empty_value:
206 setattr(sys.modules[__name__], var, None)
207 else:
208 setattr(sys.modules[__name__], var, getattr(_config, var))
209
210
211 def ParseDependencies():
212 """Parses the config file DEPENDENCIES variable.
213
214 Raises:
215 TurbiniaException: If bad config file.
216
217 Returns:
218 dependencies(dict): The parsed dependency values.
219 """
220 dependencies = {}
221 try:
222 for values in CONFIG.DEPENDENCIES:
223 job = values['job'].lower()
224 dependencies[job] = {}
225 dependencies[job]['programs'] = values['programs']
226 dependencies[job]['docker_image'] = values.get('docker_image')
227 dependencies[job]['timeout'] = values.get('timeout')
228 except (KeyError, TypeError) as exception:
229 raise TurbiniaException(
230 'An issue has occurred while parsing the '
231 'dependency config: {0!s}'.format(exception))
232 return dependencies
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py
--- a/turbinia/config/__init__.py
+++ b/turbinia/config/__init__.py
@@ -16,12 +16,12 @@
from __future__ import unicode_literals
-import imp
+import importlib.util
+import importlib.machinery
import itertools
import logging
import os
import sys
-
from turbinia import TurbiniaException
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
@@ -156,7 +156,11 @@
if 'turbinia_config_tmpl' in config_file:
log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))
try:
- _config = imp.load_source('config', config_file)
+ config_loader = importlib.machinery.SourceFileLoader('config', config_file)
+ config_spec = importlib.util.spec_from_loader(
+ config_loader.name, config_loader)
+ _config = importlib.util.module_from_spec(config_spec)
+ config_loader.exec_module(_config)
except IOError as exception:
message = (
'Could not load config file {0:s}: {1!s}'.format(
| {"golden_diff": "diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py\n--- a/turbinia/config/__init__.py\n+++ b/turbinia/config/__init__.py\n@@ -16,12 +16,12 @@\n \n from __future__ import unicode_literals\n \n-import imp\n+import importlib.util\n+import importlib.machinery\n import itertools\n import logging\n import os\n import sys\n-\n from turbinia import TurbiniaException\n \n DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n@@ -156,7 +156,11 @@\n if 'turbinia_config_tmpl' in config_file:\n log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))\n try:\n- _config = imp.load_source('config', config_file)\n+ config_loader = importlib.machinery.SourceFileLoader('config', config_file)\n+ config_spec = importlib.util.spec_from_loader(\n+ config_loader.name, config_loader)\n+ _config = importlib.util.module_from_spec(config_spec)\n+ config_loader.exec_module(_config)\n except IOError as exception:\n message = (\n 'Could not load config file {0:s}: {1!s}'.format(\n", "issue": "Config module 'imp' deprecation warning\nCurrently, the config module uses a deprecated library method to load the config file into a module which causes a DeprecationWarning\r\n\r\n```============================================= warnings summary =============================================\r\nturbinia/config/__init__.py:19\r\n /workspaces/turbinia/turbinia/config/__init__.py:19: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses\r\n import imp\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nfrom turbinia import TurbiniaException\n\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config_tmpl.py']\n# Look in homedir first, then /etc/turbinia\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__)),\n]\n# Config setup reminder for cleaner error handling on empty configs.\nCONFIG_MSG = (\n 'Copy turbinia/config/turbinia_config_tmpl.py to ~/.turbiniarc '\n 'or /etc/turbinia/turbinia.conf, edit, and re-run.')\n\n# Required config vars\nREQUIRED_VARS = [\n # Turbinia Config\n 'INSTANCE_ID',\n 'STATE_MANAGER',\n 'TASK_MANAGER',\n 'LOG_DIR',\n 'LOCK_FILE',\n 'TMP_RESOURCE_DIR',\n 'RESOURCE_FILE',\n 'RESOURCE_FILE_LOCK',\n 'SCALEDOWN_WORKER_FILE',\n 'OUTPUT_DIR',\n 'TMP_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n 'DEBUG_TASKS',\n 'DEPENDENCIES',\n 'DOCKER_ENABLED',\n 'DISABLED_JOBS',\n]\n\n# Optional config vars. Some may be mandatory depending on the configuration\n# (e.g. if TASK_MANAGER is set to 'PSQ', then the GCE Config variables are\n# required), but these requirements are not enforced.\nOPTIONAL_VARS = [\n # GCE CONFIG\n 'TURBINIA_PROJECT',\n 'TURBINIA_ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'RECIPE_FILE_DIR',\n 'STACKDRIVER_LOGGING',\n 'STACKDRIVER_TRACEBACK',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',\n # Email config\n 'EMAIL_NOTIFICATIONS',\n 'EMAIL_HOST_ADDRESS',\n 'EMAIL_PORT',\n 'EMAIL_ADDRESS',\n 'EMAIL_PASSWORD',\n # Prometheus config\n 'PROMETHEUS_ENABLED',\n 'PROMETHEUS_ADDR',\n 'PROMETHEUS_PORT',\n # dfDewey config\n 'DFDEWEY_PG_HOST',\n 'DFDEWEY_PG_PORT',\n 'DFDEWEY_PG_DB_NAME',\n 'DFDEWEY_OS_HOST',\n 'DFDEWEY_OS_PORT',\n 'DFDEWEY_OS_URL',\n # General config\n 'TURBINIA_COMMAND'\n]\n\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\nlog = logging.getLogger('turbinia')\n\n\ndef LoadConfig(config_file=None):\n \"\"\"Finds Turbinia config file and loads it.\n\n Args:\n config_file(str): full path to config file\n \"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG and not config_file:\n log.debug(\n 'Returning cached config from {0:s} instead of reloading config'.format(\n CONFIG.configSource))\n return CONFIG\n\n if not config_file:\n log.debug('No config specified. Looking in default locations for config.')\n # If the environment variable is set, take precedence over the pre-defined\n # CONFIGPATHs.\n configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n configpath = os.environ[ENVCONFIGVAR].split(':')\n\n # Load first file found\n for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaException('No config files found')\n\n log.debug('Loading config from {0:s}'.format(config_file))\n # Warn about using fallback source config, but it's currently necessary for\n # tests. See issue #446.\n if 'turbinia_config_tmpl' in config_file:\n log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))\n try:\n _config = imp.load_source('config', config_file)\n except IOError as exception:\n message = (\n 'Could not load config file {0:s}: {1!s}'.format(\n config_file, exception))\n log.error(message)\n raise TurbiniaException(message)\n\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n\n # Set the environment var for this so that we don't see the \"No project ID\n # could be determined.\" warning later.\n if hasattr(_config, 'TURBINIA_PROJECT') and _config.TURBINIA_PROJECT:\n os.environ['GOOGLE_CLOUD_PROJECT'] = _config.TURBINIA_PROJECT\n\n CONFIG = _config\n log.debug(\n 'Returning parsed config loaded from {0:s}'.format(CONFIG.configSource))\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # Explicitly set the config path\n setattr(sys.modules[__name__], 'configSource', _config.configSource)\n\n CONFIGVARS = REQUIRED_VARS + OPTIONAL_VARS\n for var in CONFIGVARS:\n empty_value = False\n if not hasattr(_config, var):\n if var in OPTIONAL_VARS:\n log.debug(\n 'Setting non-existent but optional config variable {0:s} to '\n 'None'.format(var))\n empty_value = True\n else:\n raise TurbiniaException(\n 'Required config attribute {0:s}:{1:s} not in config'.format(\n _config.configSource, var))\n if var in REQUIRED_VARS and getattr(_config, var) is None:\n raise TurbiniaException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n if empty_value:\n setattr(sys.modules[__name__], var, None)\n else:\n setattr(sys.modules[__name__], var, getattr(_config, var))\n\n\ndef ParseDependencies():\n \"\"\"Parses the config file DEPENDENCIES variable.\n\n Raises:\n TurbiniaException: If bad config file.\n\n Returns:\n dependencies(dict): The parsed dependency values.\n \"\"\"\n dependencies = {}\n try:\n for values in CONFIG.DEPENDENCIES:\n job = values['job'].lower()\n dependencies[job] = {}\n dependencies[job]['programs'] = values['programs']\n dependencies[job]['docker_image'] = values.get('docker_image')\n dependencies[job]['timeout'] = values.get('timeout')\n except (KeyError, TypeError) as exception:\n raise TurbiniaException(\n 'An issue has occurred while parsing the '\n 'dependency config: {0!s}'.format(exception))\n return dependencies\n", "path": "turbinia/config/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport importlib.util\nimport importlib.machinery\nimport itertools\nimport logging\nimport os\nimport sys\nfrom turbinia import TurbiniaException\n\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config_tmpl.py']\n# Look in homedir first, then /etc/turbinia\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__)),\n]\n# Config setup reminder for cleaner error handling on empty configs.\nCONFIG_MSG = (\n 'Copy turbinia/config/turbinia_config_tmpl.py to ~/.turbiniarc '\n 'or /etc/turbinia/turbinia.conf, edit, and re-run.')\n\n# Required config vars\nREQUIRED_VARS = [\n # Turbinia Config\n 'INSTANCE_ID',\n 'STATE_MANAGER',\n 'TASK_MANAGER',\n 'LOG_DIR',\n 'LOCK_FILE',\n 'TMP_RESOURCE_DIR',\n 'RESOURCE_FILE',\n 'RESOURCE_FILE_LOCK',\n 'SCALEDOWN_WORKER_FILE',\n 'OUTPUT_DIR',\n 'TMP_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n 'DEBUG_TASKS',\n 'DEPENDENCIES',\n 'DOCKER_ENABLED',\n 'DISABLED_JOBS',\n]\n\n# Optional config vars. Some may be mandatory depending on the configuration\n# (e.g. if TASK_MANAGER is set to 'PSQ', then the GCE Config variables are\n# required), but these requirements are not enforced.\nOPTIONAL_VARS = [\n # GCE CONFIG\n 'TURBINIA_PROJECT',\n 'TURBINIA_ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'RECIPE_FILE_DIR',\n 'STACKDRIVER_LOGGING',\n 'STACKDRIVER_TRACEBACK',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',\n # Email config\n 'EMAIL_NOTIFICATIONS',\n 'EMAIL_HOST_ADDRESS',\n 'EMAIL_PORT',\n 'EMAIL_ADDRESS',\n 'EMAIL_PASSWORD',\n # Prometheus config\n 'PROMETHEUS_ENABLED',\n 'PROMETHEUS_ADDR',\n 'PROMETHEUS_PORT',\n # dfDewey config\n 'DFDEWEY_PG_HOST',\n 'DFDEWEY_PG_PORT',\n 'DFDEWEY_PG_DB_NAME',\n 'DFDEWEY_OS_HOST',\n 'DFDEWEY_OS_PORT',\n 'DFDEWEY_OS_URL',\n # General config\n 'TURBINIA_COMMAND'\n]\n\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\nlog = logging.getLogger('turbinia')\n\n\ndef LoadConfig(config_file=None):\n \"\"\"Finds Turbinia config file and loads it.\n\n Args:\n config_file(str): full path to config file\n \"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG and not config_file:\n log.debug(\n 'Returning cached config from {0:s} instead of reloading config'.format(\n CONFIG.configSource))\n return CONFIG\n\n if not config_file:\n log.debug('No config specified. Looking in default locations for config.')\n # If the environment variable is set, take precedence over the pre-defined\n # CONFIGPATHs.\n configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n configpath = os.environ[ENVCONFIGVAR].split(':')\n\n # Load first file found\n for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaException('No config files found')\n\n log.debug('Loading config from {0:s}'.format(config_file))\n # Warn about using fallback source config, but it's currently necessary for\n # tests. See issue #446.\n if 'turbinia_config_tmpl' in config_file:\n log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))\n try:\n config_loader = importlib.machinery.SourceFileLoader('config', config_file)\n config_spec = importlib.util.spec_from_loader(\n config_loader.name, config_loader)\n _config = importlib.util.module_from_spec(config_spec)\n config_loader.exec_module(_config)\n except IOError as exception:\n message = (\n 'Could not load config file {0:s}: {1!s}'.format(\n config_file, exception))\n log.error(message)\n raise TurbiniaException(message)\n\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n\n # Set the environment var for this so that we don't see the \"No project ID\n # could be determined.\" warning later.\n if hasattr(_config, 'TURBINIA_PROJECT') and _config.TURBINIA_PROJECT:\n os.environ['GOOGLE_CLOUD_PROJECT'] = _config.TURBINIA_PROJECT\n\n CONFIG = _config\n log.debug(\n 'Returning parsed config loaded from {0:s}'.format(CONFIG.configSource))\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # Explicitly set the config path\n setattr(sys.modules[__name__], 'configSource', _config.configSource)\n\n CONFIGVARS = REQUIRED_VARS + OPTIONAL_VARS\n for var in CONFIGVARS:\n empty_value = False\n if not hasattr(_config, var):\n if var in OPTIONAL_VARS:\n log.debug(\n 'Setting non-existent but optional config variable {0:s} to '\n 'None'.format(var))\n empty_value = True\n else:\n raise TurbiniaException(\n 'Required config attribute {0:s}:{1:s} not in config'.format(\n _config.configSource, var))\n if var in REQUIRED_VARS and getattr(_config, var) is None:\n raise TurbiniaException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n if empty_value:\n setattr(sys.modules[__name__], var, None)\n else:\n setattr(sys.modules[__name__], var, getattr(_config, var))\n\n\ndef ParseDependencies():\n \"\"\"Parses the config file DEPENDENCIES variable.\n\n Raises:\n TurbiniaException: If bad config file.\n\n Returns:\n dependencies(dict): The parsed dependency values.\n \"\"\"\n dependencies = {}\n try:\n for values in CONFIG.DEPENDENCIES:\n job = values['job'].lower()\n dependencies[job] = {}\n dependencies[job]['programs'] = values['programs']\n dependencies[job]['docker_image'] = values.get('docker_image')\n dependencies[job]['timeout'] = values.get('timeout')\n except (KeyError, TypeError) as exception:\n raise TurbiniaException(\n 'An issue has occurred while parsing the '\n 'dependency config: {0!s}'.format(exception))\n return dependencies\n", "path": "turbinia/config/__init__.py"}]} | 2,760 | 282 |
gh_patches_debug_28789 | rasdani/github-patches | git_diff | pypa__virtualenv-2206 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect (broken) virtualenv layout with pypy3.8's new layout
**Issue**
PyPy3.8 (currently 7.3.6rc1) supports a new install layout that resembles CPython more. That is, `sys.prefix` no longer needs being isolated, and site-packages are found in `$prefix/lib/pypy3.8/site-packages`. However, virtualenv tries to symlink everything from `/usr/lib` including the `pypy3.8` directory. As a result, the user can't write to the site-packages directory in the venv.
I haven't tried running it as root but I can imagine it doing major mess if it virtualenv doesn't take any precautions from writing into system directories.
**Environment**
Provide at least:
- OS: Gentoo Linux
- ``pip list`` of the host python where ``virtualenv`` is installed: [pip-list.txt](https://github.com/pypa/virtualenv/files/7167321/pip-list.txt)
**Output of the virtual environment creation**
Make sure to run the creation with `-vvv --with-traceback`:
Full output: [output.txt](https://github.com/pypa/virtualenv/files/7167331/output.txt)
tail:
```
1048 create virtualenv import hook file /tmp/z/lib/pypy3.8/site-packages/_virtualenv.pth [DEBUG api:95]
Traceback (most recent call last):
File "/usr/lib/pypy3.8/runpy.py", line 198, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/pypy3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/lib/pypy3.8/site-packages/virtualenv/__main__.py", line 80, in <module>
run_with_catch() # pragma: no cov
File "/usr/lib/pypy3.8/site-packages/virtualenv/__main__.py", line 65, in run_with_catch
run(args, options, env)
File "/usr/lib/pypy3.8/site-packages/virtualenv/__main__.py", line 18, in run
session = cli_run(args, options, env)
File "/usr/lib/pypy3.8/site-packages/virtualenv/run/__init__.py", line 32, in cli_run
of_session.run()
File "/usr/lib/pypy3.8/site-packages/virtualenv/run/session.py", line 46, in run
self._create()
File "/usr/lib/pypy3.8/site-packages/virtualenv/run/session.py", line 53, in _create
self.creator.run()
File "/usr/lib/pypy3.8/site-packages/virtualenv/create/creator.py", line 171, in run
self.create()
File "/usr/lib/pypy3.8/site-packages/virtualenv/create/via_global_ref/builtin/via_global_self_do.py", line 101, in create
super(ViaGlobalRefVirtualenvBuiltin, self).create()
File "/usr/lib/pypy3.8/site-packages/virtualenv/create/via_global_ref/api.py", line 89, in create
self.install_patch()
File "/usr/lib/pypy3.8/site-packages/virtualenv/create/via_global_ref/api.py", line 96, in install_patch
pth.write_text("import _virtualenv")
File "/usr/lib/pypy3.8/pathlib.py", line 1255, in write_text
with self.open(mode='w', encoding=encoding, errors=errors) as f:
File "/usr/lib/pypy3.8/pathlib.py", line 1223, in open
opener=self._opener)
File "/usr/lib/pypy3.8/pathlib.py", line 1078, in _opener
return self._accessor.open(self, flags, mode)
PermissionError: [Errno 13] Permission denied: PosixPath('/tmp/z/lib/pypy3.8/site-packages/_virtualenv.pth')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import abc
4
5 from six import add_metaclass
6
7 from virtualenv.create.describe import PosixSupports, Python3Supports, WindowsSupports
8 from virtualenv.create.via_global_ref.builtin.ref import PathRefToDest
9 from virtualenv.util.path import Path
10
11 from .common import PyPy
12
13
14 @add_metaclass(abc.ABCMeta)
15 class PyPy3(PyPy, Python3Supports):
16 @classmethod
17 def exe_stem(cls):
18 return "pypy3"
19
20 @classmethod
21 def exe_names(cls, interpreter):
22 return super(PyPy3, cls).exe_names(interpreter) | {"pypy"}
23
24
25 class PyPy3Posix(PyPy3, PosixSupports):
26 """PyPy 2 on POSIX"""
27
28 @property
29 def stdlib(self):
30 """PyPy3 respects sysconfig only for the host python, virtual envs is instead lib/pythonx.y/site-packages"""
31 return self.dest / "lib" / "python{}".format(self.interpreter.version_release_str) / "site-packages"
32
33 @classmethod
34 def _shared_libs(cls):
35 return ["libpypy3-c.so", "libpypy3-c.dylib"]
36
37 def to_lib(self, src):
38 return self.dest / "lib" / src.name
39
40 @classmethod
41 def sources(cls, interpreter):
42 for src in super(PyPy3Posix, cls).sources(interpreter):
43 yield src
44 host_lib = Path(interpreter.system_prefix) / "lib"
45 if host_lib.exists() and host_lib.is_dir():
46 for path in host_lib.iterdir():
47 yield PathRefToDest(path, dest=cls.to_lib)
48
49
50 class Pypy3Windows(PyPy3, WindowsSupports):
51 """PyPy 2 on Windows"""
52
53 @property
54 def stdlib(self):
55 """PyPy3 respects sysconfig only for the host python, virtual envs is instead Lib/site-packages"""
56 return self.dest / "Lib" / "site-packages"
57
58 @property
59 def bin_dir(self):
60 """PyPy3 needs to fallback to pypy definition"""
61 return self.dest / "Scripts"
62
63 @classmethod
64 def _shared_libs(cls):
65 return ["libpypy3-c.dll", "libffi-7.dll"]
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py b/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py
--- a/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py
+++ b/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py
@@ -28,7 +28,7 @@
@property
def stdlib(self):
"""PyPy3 respects sysconfig only for the host python, virtual envs is instead lib/pythonx.y/site-packages"""
- return self.dest / "lib" / "python{}".format(self.interpreter.version_release_str) / "site-packages"
+ return self.dest / "lib" / "pypy{}".format(self.interpreter.version_release_str) / "site-packages"
@classmethod
def _shared_libs(cls):
@@ -41,9 +41,19 @@
def sources(cls, interpreter):
for src in super(PyPy3Posix, cls).sources(interpreter):
yield src
+ # Also copy/symlink anything under prefix/lib, which, for "portable"
+ # PyPy builds, includes the tk,tcl runtime and a number of shared
+ # objects. In distro-specific builds or on conda this should be empty
+ # (on PyPy3.8+ it will, like on CPython, hold the stdlib).
host_lib = Path(interpreter.system_prefix) / "lib"
+ stdlib = Path(interpreter.system_stdlib)
if host_lib.exists() and host_lib.is_dir():
for path in host_lib.iterdir():
+ if stdlib == path:
+ # For PyPy3.8+ the stdlib lives in lib/pypy3.8
+ # We need to avoid creating a symlink to it since that
+ # will defeat the purpose of a virtualenv
+ continue
yield PathRefToDest(path, dest=cls.to_lib)
| {"golden_diff": "diff --git a/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py b/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py\n--- a/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py\n+++ b/src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py\n@@ -28,7 +28,7 @@\n @property\n def stdlib(self):\n \"\"\"PyPy3 respects sysconfig only for the host python, virtual envs is instead lib/pythonx.y/site-packages\"\"\"\n- return self.dest / \"lib\" / \"python{}\".format(self.interpreter.version_release_str) / \"site-packages\"\n+ return self.dest / \"lib\" / \"pypy{}\".format(self.interpreter.version_release_str) / \"site-packages\"\n \n @classmethod\n def _shared_libs(cls):\n@@ -41,9 +41,19 @@\n def sources(cls, interpreter):\n for src in super(PyPy3Posix, cls).sources(interpreter):\n yield src\n+ # Also copy/symlink anything under prefix/lib, which, for \"portable\"\n+ # PyPy builds, includes the tk,tcl runtime and a number of shared\n+ # objects. In distro-specific builds or on conda this should be empty\n+ # (on PyPy3.8+ it will, like on CPython, hold the stdlib).\n host_lib = Path(interpreter.system_prefix) / \"lib\"\n+ stdlib = Path(interpreter.system_stdlib)\n if host_lib.exists() and host_lib.is_dir():\n for path in host_lib.iterdir():\n+ if stdlib == path:\n+ # For PyPy3.8+ the stdlib lives in lib/pypy3.8\n+ # We need to avoid creating a symlink to it since that\n+ # will defeat the purpose of a virtualenv\n+ continue\n yield PathRefToDest(path, dest=cls.to_lib)\n", "issue": "Incorrect (broken) virtualenv layout with pypy3.8's new layout\n**Issue**\r\n\r\nPyPy3.8 (currently 7.3.6rc1) supports a new install layout that resembles CPython more. That is, `sys.prefix` no longer needs being isolated, and site-packages are found in `$prefix/lib/pypy3.8/site-packages`. However, virtualenv tries to symlink everything from `/usr/lib` including the `pypy3.8` directory. As a result, the user can't write to the site-packages directory in the venv.\r\n\r\nI haven't tried running it as root but I can imagine it doing major mess if it virtualenv doesn't take any precautions from writing into system directories.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Gentoo Linux\r\n- ``pip list`` of the host python where ``virtualenv`` is installed: [pip-list.txt](https://github.com/pypa/virtualenv/files/7167321/pip-list.txt)\r\n\r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\n\r\nFull output: [output.txt](https://github.com/pypa/virtualenv/files/7167331/output.txt)\r\n\r\ntail:\r\n```\r\n1048 create virtualenv import hook file /tmp/z/lib/pypy3.8/site-packages/_virtualenv.pth [DEBUG api:95]\r\nTraceback (most recent call last):\r\n File \"/usr/lib/pypy3.8/runpy.py\", line 198, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/pypy3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/__main__.py\", line 80, in <module>\r\n run_with_catch() # pragma: no cov\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/__main__.py\", line 65, in run_with_catch\r\n run(args, options, env)\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/__main__.py\", line 18, in run\r\n session = cli_run(args, options, env)\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/run/__init__.py\", line 32, in cli_run\r\n of_session.run()\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/run/session.py\", line 46, in run\r\n self._create()\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/run/session.py\", line 53, in _create\r\n self.creator.run()\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/create/creator.py\", line 171, in run\r\n self.create()\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/create/via_global_ref/builtin/via_global_self_do.py\", line 101, in create\r\n super(ViaGlobalRefVirtualenvBuiltin, self).create()\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/create/via_global_ref/api.py\", line 89, in create\r\n self.install_patch()\r\n File \"/usr/lib/pypy3.8/site-packages/virtualenv/create/via_global_ref/api.py\", line 96, in install_patch\r\n pth.write_text(\"import _virtualenv\")\r\n File \"/usr/lib/pypy3.8/pathlib.py\", line 1255, in write_text\r\n with self.open(mode='w', encoding=encoding, errors=errors) as f:\r\n File \"/usr/lib/pypy3.8/pathlib.py\", line 1223, in open\r\n opener=self._opener)\r\n File \"/usr/lib/pypy3.8/pathlib.py\", line 1078, in _opener\r\n return self._accessor.open(self, flags, mode)\r\nPermissionError: [Errno 13] Permission denied: PosixPath('/tmp/z/lib/pypy3.8/site-packages/_virtualenv.pth')\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport abc\n\nfrom six import add_metaclass\n\nfrom virtualenv.create.describe import PosixSupports, Python3Supports, WindowsSupports\nfrom virtualenv.create.via_global_ref.builtin.ref import PathRefToDest\nfrom virtualenv.util.path import Path\n\nfrom .common import PyPy\n\n\n@add_metaclass(abc.ABCMeta)\nclass PyPy3(PyPy, Python3Supports):\n @classmethod\n def exe_stem(cls):\n return \"pypy3\"\n\n @classmethod\n def exe_names(cls, interpreter):\n return super(PyPy3, cls).exe_names(interpreter) | {\"pypy\"}\n\n\nclass PyPy3Posix(PyPy3, PosixSupports):\n \"\"\"PyPy 2 on POSIX\"\"\"\n\n @property\n def stdlib(self):\n \"\"\"PyPy3 respects sysconfig only for the host python, virtual envs is instead lib/pythonx.y/site-packages\"\"\"\n return self.dest / \"lib\" / \"python{}\".format(self.interpreter.version_release_str) / \"site-packages\"\n\n @classmethod\n def _shared_libs(cls):\n return [\"libpypy3-c.so\", \"libpypy3-c.dylib\"]\n\n def to_lib(self, src):\n return self.dest / \"lib\" / src.name\n\n @classmethod\n def sources(cls, interpreter):\n for src in super(PyPy3Posix, cls).sources(interpreter):\n yield src\n host_lib = Path(interpreter.system_prefix) / \"lib\"\n if host_lib.exists() and host_lib.is_dir():\n for path in host_lib.iterdir():\n yield PathRefToDest(path, dest=cls.to_lib)\n\n\nclass Pypy3Windows(PyPy3, WindowsSupports):\n \"\"\"PyPy 2 on Windows\"\"\"\n\n @property\n def stdlib(self):\n \"\"\"PyPy3 respects sysconfig only for the host python, virtual envs is instead Lib/site-packages\"\"\"\n return self.dest / \"Lib\" / \"site-packages\"\n\n @property\n def bin_dir(self):\n \"\"\"PyPy3 needs to fallback to pypy definition\"\"\"\n return self.dest / \"Scripts\"\n\n @classmethod\n def _shared_libs(cls):\n return [\"libpypy3-c.dll\", \"libffi-7.dll\"]\n", "path": "src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport abc\n\nfrom six import add_metaclass\n\nfrom virtualenv.create.describe import PosixSupports, Python3Supports, WindowsSupports\nfrom virtualenv.create.via_global_ref.builtin.ref import PathRefToDest\nfrom virtualenv.util.path import Path\n\nfrom .common import PyPy\n\n\n@add_metaclass(abc.ABCMeta)\nclass PyPy3(PyPy, Python3Supports):\n @classmethod\n def exe_stem(cls):\n return \"pypy3\"\n\n @classmethod\n def exe_names(cls, interpreter):\n return super(PyPy3, cls).exe_names(interpreter) | {\"pypy\"}\n\n\nclass PyPy3Posix(PyPy3, PosixSupports):\n \"\"\"PyPy 2 on POSIX\"\"\"\n\n @property\n def stdlib(self):\n \"\"\"PyPy3 respects sysconfig only for the host python, virtual envs is instead lib/pythonx.y/site-packages\"\"\"\n return self.dest / \"lib\" / \"pypy{}\".format(self.interpreter.version_release_str) / \"site-packages\"\n\n @classmethod\n def _shared_libs(cls):\n return [\"libpypy3-c.so\", \"libpypy3-c.dylib\"]\n\n def to_lib(self, src):\n return self.dest / \"lib\" / src.name\n\n @classmethod\n def sources(cls, interpreter):\n for src in super(PyPy3Posix, cls).sources(interpreter):\n yield src\n # Also copy/symlink anything under prefix/lib, which, for \"portable\"\n # PyPy builds, includes the tk,tcl runtime and a number of shared\n # objects. In distro-specific builds or on conda this should be empty\n # (on PyPy3.8+ it will, like on CPython, hold the stdlib).\n host_lib = Path(interpreter.system_prefix) / \"lib\"\n stdlib = Path(interpreter.system_stdlib)\n if host_lib.exists() and host_lib.is_dir():\n for path in host_lib.iterdir():\n if stdlib == path:\n # For PyPy3.8+ the stdlib lives in lib/pypy3.8\n # We need to avoid creating a symlink to it since that\n # will defeat the purpose of a virtualenv\n continue\n yield PathRefToDest(path, dest=cls.to_lib)\n\n\nclass Pypy3Windows(PyPy3, WindowsSupports):\n \"\"\"PyPy 2 on Windows\"\"\"\n\n @property\n def stdlib(self):\n \"\"\"PyPy3 respects sysconfig only for the host python, virtual envs is instead Lib/site-packages\"\"\"\n return self.dest / \"Lib\" / \"site-packages\"\n\n @property\n def bin_dir(self):\n \"\"\"PyPy3 needs to fallback to pypy definition\"\"\"\n return self.dest / \"Scripts\"\n\n @classmethod\n def _shared_libs(cls):\n return [\"libpypy3-c.dll\", \"libffi-7.dll\"]\n", "path": "src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py"}]} | 1,811 | 439 |
gh_patches_debug_36026 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-2094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Save phone number received from login.gov
### Issue description
For the user profile feature, we will need phone number from login.gov and need it to appear on the connected Contact object. We should save the phone number both the user and connected contact as soon as we get it, but only if the phone number isn't already filled in. Users can update the phone number as needed in our system and we should trust if they already have a phone number filled in manually then it is probably a better one to contact them on anyways.
### Acceptance criteria
- [ ] Save phone number from login.gov to a user's Contact
- [ ] If phone number is already filled in for the associated contact, update the user and leave the contact alone
- [ ] On saving contact, if contact has a related user with no phone number, update user
- [ ] Update unit tests
### Additional context
_No response_
### Links to other issues
blocks: #1807
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/models/contact.py`
Content:
```
1 from django.db import models
2
3 from .utility.time_stamped_model import TimeStampedModel
4
5 from phonenumber_field.modelfields import PhoneNumberField # type: ignore
6
7
8 class Contact(TimeStampedModel):
9 """Contact information follows a similar pattern for each contact."""
10
11 user = models.OneToOneField(
12 "registrar.User",
13 null=True,
14 blank=True,
15 on_delete=models.SET_NULL,
16 )
17
18 first_name = models.CharField(
19 null=True,
20 blank=True,
21 verbose_name="first name",
22 db_index=True,
23 )
24 middle_name = models.CharField(
25 null=True,
26 blank=True,
27 )
28 last_name = models.CharField(
29 null=True,
30 blank=True,
31 verbose_name="last name",
32 db_index=True,
33 )
34 title = models.CharField(
35 null=True,
36 blank=True,
37 verbose_name="title / role",
38 )
39 email = models.EmailField(
40 null=True,
41 blank=True,
42 db_index=True,
43 max_length=320,
44 )
45 phone = PhoneNumberField(
46 null=True,
47 blank=True,
48 db_index=True,
49 )
50
51 def _get_all_relations(self):
52 """Returns an array of all fields which are relations"""
53 return [f.name for f in self._meta.get_fields() if f.is_relation]
54
55 def has_more_than_one_join(self, expected_relation):
56 """Helper for finding whether an object is joined more than once.
57 expected_relation is the one relation with one expected join"""
58 # all_relations is the list of all_relations (from contact) to be checked for existing joins
59 all_relations = self._get_all_relations()
60 return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)
61
62 def _has_more_than_one_join_per_relation(self, relation, expected_relation):
63 """Helper for finding whether an object is joined more than once."""
64 # threshold is the number of related objects that are acceptable
65 # when determining if related objects exist. threshold is 0 for most
66 # relationships. if the relationship is expected_relation, we know that
67 # there is already exactly 1 acceptable relationship (the one we are
68 # attempting to delete), so the threshold is 1
69 threshold = 1 if relation == expected_relation else 0
70
71 # Raise a KeyError if rel is not a defined field on the db_obj model
72 # This will help catch any errors in relation passed.
73 if relation not in [field.name for field in self._meta.get_fields()]:
74 raise KeyError(f"{relation} is not a defined field on the {self._meta.model_name} model.")
75
76 # if attr rel in db_obj is not None, then test if reference object(s) exist
77 if getattr(self, relation) is not None:
78 field = self._meta.get_field(relation)
79 if isinstance(field, models.OneToOneField):
80 # if the rel field is a OneToOne field, then we have already
81 # determined that the object exists (is not None)
82 # so return True unless the relation being tested is the expected_relation
83 is_not_expected_relation = relation != expected_relation
84 return is_not_expected_relation
85 elif isinstance(field, models.ForeignObjectRel):
86 # if the rel field is a ManyToOne or ManyToMany, then we need
87 # to determine if the count of related objects is greater than
88 # the threshold
89 return getattr(self, relation).count() > threshold
90 return False
91
92 def get_formatted_name(self):
93 """Returns the contact's name in Western order."""
94 names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]
95 return " ".join(names) if names else "Unknown"
96
97 def has_contact_info(self):
98 return bool(self.title or self.email or self.phone)
99
100 def save(self, *args, **kwargs):
101 # Call the parent class's save method to perform the actual save
102 super().save(*args, **kwargs)
103
104 # Update the related User object's first_name and last_name
105 if self.user and (not self.user.first_name or not self.user.last_name):
106 self.user.first_name = self.first_name
107 self.user.last_name = self.last_name
108 self.user.save()
109
110 def __str__(self):
111 if self.first_name or self.last_name:
112 return self.get_formatted_name()
113 elif self.email:
114 return self.email
115 elif self.pk:
116 return str(self.pk)
117 else:
118 return ""
119
```
Path: `src/djangooidc/backends.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import logging
5
6 from django.conf import settings
7 from django.contrib.auth import get_user_model
8 from django.contrib.auth.backends import ModelBackend
9 from django.utils import timezone
10
11 logger = logging.getLogger(__name__)
12
13
14 class OpenIdConnectBackend(ModelBackend):
15 """
16 This backend checks a previously performed OIDC authentication.
17 If it is OK and the user already exists in the database, it is returned.
18 If it is OK and user does not exist in the database, it is created and
19 returned unless setting OIDC_CREATE_UNKNOWN_USER is False.
20 In all other cases, None is returned.
21 """
22
23 def authenticate(self, request, **kwargs):
24 logger.debug("kwargs %s" % kwargs)
25 user = None
26 if not kwargs or "sub" not in kwargs.keys():
27 return user
28
29 UserModel = get_user_model()
30 username = self.clean_username(kwargs["sub"])
31
32 # Some OP may actually choose to withhold some information, so we must
33 # test if it is present
34 openid_data = {"last_login": timezone.now()}
35 openid_data["first_name"] = kwargs.get("given_name", "")
36 openid_data["last_name"] = kwargs.get("family_name", "")
37 openid_data["email"] = kwargs.get("email", "")
38 openid_data["phone"] = kwargs.get("phone", "")
39
40 # Note that this could be accomplished in one try-except clause, but
41 # instead we use get_or_create when creating unknown users since it has
42 # built-in safeguards for multiple threads.
43 if getattr(settings, "OIDC_CREATE_UNKNOWN_USER", True):
44 args = {
45 UserModel.USERNAME_FIELD: username,
46 # defaults _will_ be updated, these are not fallbacks
47 "defaults": openid_data,
48 }
49
50 user, created = UserModel.objects.get_or_create(**args)
51
52 if not created:
53 # If user exists, update existing user
54 self.update_existing_user(user, args["defaults"])
55 else:
56 # If user is created, configure the user
57 user = self.configure_user(user, **kwargs)
58 else:
59 try:
60 user = UserModel.objects.get_by_natural_key(username)
61 except UserModel.DoesNotExist:
62 return None
63 # run this callback for a each login
64 user.on_each_login()
65 return user
66
67 def update_existing_user(self, user, kwargs):
68 """Update other fields without overwriting first_name and last_name.
69 Overwrite first_name and last_name if not empty string"""
70
71 for key, value in kwargs.items():
72 # Check if the key is not first_name or last_name or value is not empty string
73 if key not in ["first_name", "last_name"] or value:
74 setattr(user, key, value)
75 user.save()
76
77 def clean_username(self, username):
78 """
79 Performs any cleaning on the "username" prior to using it to get or
80 create the user object. Returns the cleaned username.
81 """
82 return username
83
84 def configure_user(self, user, **kwargs):
85 """
86 Configures a user after creation and returns the updated user.
87 """
88 user.set_unusable_password()
89 return user
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/djangooidc/backends.py b/src/djangooidc/backends.py
--- a/src/djangooidc/backends.py
+++ b/src/djangooidc/backends.py
@@ -65,13 +65,31 @@
return user
def update_existing_user(self, user, kwargs):
- """Update other fields without overwriting first_name and last_name.
- Overwrite first_name and last_name if not empty string"""
+ """
+ Update user fields without overwriting certain fields.
+
+ Args:
+ user: User object to be updated.
+ kwargs: Dictionary containing fields to update and their new values.
+ Note:
+ This method updates user fields while preserving the values of 'first_name',
+ 'last_name', and 'phone' fields, unless specific conditions are met.
+
+ - 'first_name', 'last_name' or 'phone' will be updated if the provided value is not empty.
+ """
+
+ fields_to_check = ["first_name", "last_name", "phone"]
+
+ # Iterate over fields to update
for key, value in kwargs.items():
- # Check if the key is not first_name or last_name or value is not empty string
- if key not in ["first_name", "last_name"] or value:
+ # Check if the field is not 'first_name', 'last_name', or 'phone',
+ # or if it's 'first_name' or 'last_name' or 'phone' and the provided value is not empty
+ if key not in fields_to_check or (key in fields_to_check and value):
+ # Update the corresponding attribute of the user object
setattr(user, key, value)
+
+ # Save the user object with the updated fields
user.save()
def clean_username(self, username):
diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py
--- a/src/registrar/models/contact.py
+++ b/src/registrar/models/contact.py
@@ -101,11 +101,23 @@
# Call the parent class's save method to perform the actual save
super().save(*args, **kwargs)
- # Update the related User object's first_name and last_name
- if self.user and (not self.user.first_name or not self.user.last_name):
- self.user.first_name = self.first_name
- self.user.last_name = self.last_name
- self.user.save()
+ if self.user:
+ updated = False
+
+ # Update first name and last name if necessary
+ if not self.user.first_name or not self.user.last_name:
+ self.user.first_name = self.first_name
+ self.user.last_name = self.last_name
+ updated = True
+
+ # Update phone if necessary
+ if not self.user.phone:
+ self.user.phone = self.phone
+ updated = True
+
+ # Save user if any updates were made
+ if updated:
+ self.user.save()
def __str__(self):
if self.first_name or self.last_name:
| {"golden_diff": "diff --git a/src/djangooidc/backends.py b/src/djangooidc/backends.py\n--- a/src/djangooidc/backends.py\n+++ b/src/djangooidc/backends.py\n@@ -65,13 +65,31 @@\n return user\n \n def update_existing_user(self, user, kwargs):\n- \"\"\"Update other fields without overwriting first_name and last_name.\n- Overwrite first_name and last_name if not empty string\"\"\"\n+ \"\"\"\n+ Update user fields without overwriting certain fields.\n+\n+ Args:\n+ user: User object to be updated.\n+ kwargs: Dictionary containing fields to update and their new values.\n \n+ Note:\n+ This method updates user fields while preserving the values of 'first_name',\n+ 'last_name', and 'phone' fields, unless specific conditions are met.\n+\n+ - 'first_name', 'last_name' or 'phone' will be updated if the provided value is not empty.\n+ \"\"\"\n+\n+ fields_to_check = [\"first_name\", \"last_name\", \"phone\"]\n+\n+ # Iterate over fields to update\n for key, value in kwargs.items():\n- # Check if the key is not first_name or last_name or value is not empty string\n- if key not in [\"first_name\", \"last_name\"] or value:\n+ # Check if the field is not 'first_name', 'last_name', or 'phone',\n+ # or if it's 'first_name' or 'last_name' or 'phone' and the provided value is not empty\n+ if key not in fields_to_check or (key in fields_to_check and value):\n+ # Update the corresponding attribute of the user object\n setattr(user, key, value)\n+\n+ # Save the user object with the updated fields\n user.save()\n \n def clean_username(self, username):\ndiff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py\n--- a/src/registrar/models/contact.py\n+++ b/src/registrar/models/contact.py\n@@ -101,11 +101,23 @@\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n \n- # Update the related User object's first_name and last_name\n- if self.user and (not self.user.first_name or not self.user.last_name):\n- self.user.first_name = self.first_name\n- self.user.last_name = self.last_name\n- self.user.save()\n+ if self.user:\n+ updated = False\n+\n+ # Update first name and last name if necessary\n+ if not self.user.first_name or not self.user.last_name:\n+ self.user.first_name = self.first_name\n+ self.user.last_name = self.last_name\n+ updated = True\n+\n+ # Update phone if necessary\n+ if not self.user.phone:\n+ self.user.phone = self.phone\n+ updated = True\n+\n+ # Save user if any updates were made\n+ if updated:\n+ self.user.save()\n \n def __str__(self):\n if self.first_name or self.last_name:\n", "issue": "Save phone number received from login.gov\n### Issue description\n\nFor the user profile feature, we will need phone number from login.gov and need it to appear on the connected Contact object. We should save the phone number both the user and connected contact as soon as we get it, but only if the phone number isn't already filled in. Users can update the phone number as needed in our system and we should trust if they already have a phone number filled in manually then it is probably a better one to contact them on anyways. \n\n### Acceptance criteria\n\n- [ ] Save phone number from login.gov to a user's Contact\n- [ ] If phone number is already filled in for the associated contact, update the user and leave the contact alone\n- [ ] On saving contact, if contact has a related user with no phone number, update user\n- [ ] Update unit tests\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\nblocks: #1807 \n", "before_files": [{"content": "from django.db import models\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"first name\",\n db_index=True,\n )\n middle_name = models.CharField(\n null=True,\n blank=True,\n )\n last_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"last name\",\n db_index=True,\n )\n title = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"title / role\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n db_index=True,\n max_length=320,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n db_index=True,\n )\n\n def _get_all_relations(self):\n \"\"\"Returns an array of all fields which are relations\"\"\"\n return [f.name for f in self._meta.get_fields() if f.is_relation]\n\n def has_more_than_one_join(self, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\n expected_relation is the one relation with one expected join\"\"\"\n # all_relations is the list of all_relations (from contact) to be checked for existing joins\n all_relations = self._get_all_relations()\n return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)\n\n def _has_more_than_one_join_per_relation(self, relation, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\"\"\"\n # threshold is the number of related objects that are acceptable\n # when determining if related objects exist. threshold is 0 for most\n # relationships. if the relationship is expected_relation, we know that\n # there is already exactly 1 acceptable relationship (the one we are\n # attempting to delete), so the threshold is 1\n threshold = 1 if relation == expected_relation else 0\n\n # Raise a KeyError if rel is not a defined field on the db_obj model\n # This will help catch any errors in relation passed.\n if relation not in [field.name for field in self._meta.get_fields()]:\n raise KeyError(f\"{relation} is not a defined field on the {self._meta.model_name} model.\")\n\n # if attr rel in db_obj is not None, then test if reference object(s) exist\n if getattr(self, relation) is not None:\n field = self._meta.get_field(relation)\n if isinstance(field, models.OneToOneField):\n # if the rel field is a OneToOne field, then we have already\n # determined that the object exists (is not None)\n # so return True unless the relation being tested is the expected_relation\n is_not_expected_relation = relation != expected_relation\n return is_not_expected_relation\n elif isinstance(field, models.ForeignObjectRel):\n # if the rel field is a ManyToOne or ManyToMany, then we need\n # to determine if the count of related objects is greater than\n # the threshold\n return getattr(self, relation).count() > threshold\n return False\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def has_contact_info(self):\n return bool(self.title or self.email or self.phone)\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}, {"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.utils import timezone\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenIdConnectBackend(ModelBackend):\n \"\"\"\n This backend checks a previously performed OIDC authentication.\n If it is OK and the user already exists in the database, it is returned.\n If it is OK and user does not exist in the database, it is created and\n returned unless setting OIDC_CREATE_UNKNOWN_USER is False.\n In all other cases, None is returned.\n \"\"\"\n\n def authenticate(self, request, **kwargs):\n logger.debug(\"kwargs %s\" % kwargs)\n user = None\n if not kwargs or \"sub\" not in kwargs.keys():\n return user\n\n UserModel = get_user_model()\n username = self.clean_username(kwargs[\"sub\"])\n\n # Some OP may actually choose to withhold some information, so we must\n # test if it is present\n openid_data = {\"last_login\": timezone.now()}\n openid_data[\"first_name\"] = kwargs.get(\"given_name\", \"\")\n openid_data[\"last_name\"] = kwargs.get(\"family_name\", \"\")\n openid_data[\"email\"] = kwargs.get(\"email\", \"\")\n openid_data[\"phone\"] = kwargs.get(\"phone\", \"\")\n\n # Note that this could be accomplished in one try-except clause, but\n # instead we use get_or_create when creating unknown users since it has\n # built-in safeguards for multiple threads.\n if getattr(settings, \"OIDC_CREATE_UNKNOWN_USER\", True):\n args = {\n UserModel.USERNAME_FIELD: username,\n # defaults _will_ be updated, these are not fallbacks\n \"defaults\": openid_data,\n }\n\n user, created = UserModel.objects.get_or_create(**args)\n\n if not created:\n # If user exists, update existing user\n self.update_existing_user(user, args[\"defaults\"])\n else:\n # If user is created, configure the user\n user = self.configure_user(user, **kwargs)\n else:\n try:\n user = UserModel.objects.get_by_natural_key(username)\n except UserModel.DoesNotExist:\n return None\n # run this callback for a each login\n user.on_each_login()\n return user\n\n def update_existing_user(self, user, kwargs):\n \"\"\"Update other fields without overwriting first_name and last_name.\n Overwrite first_name and last_name if not empty string\"\"\"\n\n for key, value in kwargs.items():\n # Check if the key is not first_name or last_name or value is not empty string\n if key not in [\"first_name\", \"last_name\"] or value:\n setattr(user, key, value)\n user.save()\n\n def clean_username(self, username):\n \"\"\"\n Performs any cleaning on the \"username\" prior to using it to get or\n create the user object. Returns the cleaned username.\n \"\"\"\n return username\n\n def configure_user(self, user, **kwargs):\n \"\"\"\n Configures a user after creation and returns the updated user.\n \"\"\"\n user.set_unusable_password()\n return user\n", "path": "src/djangooidc/backends.py"}], "after_files": [{"content": "from django.db import models\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"first name\",\n db_index=True,\n )\n middle_name = models.CharField(\n null=True,\n blank=True,\n )\n last_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"last name\",\n db_index=True,\n )\n title = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"title / role\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n db_index=True,\n max_length=320,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n db_index=True,\n )\n\n def _get_all_relations(self):\n \"\"\"Returns an array of all fields which are relations\"\"\"\n return [f.name for f in self._meta.get_fields() if f.is_relation]\n\n def has_more_than_one_join(self, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\n expected_relation is the one relation with one expected join\"\"\"\n # all_relations is the list of all_relations (from contact) to be checked for existing joins\n all_relations = self._get_all_relations()\n return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)\n\n def _has_more_than_one_join_per_relation(self, relation, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\"\"\"\n # threshold is the number of related objects that are acceptable\n # when determining if related objects exist. threshold is 0 for most\n # relationships. if the relationship is expected_relation, we know that\n # there is already exactly 1 acceptable relationship (the one we are\n # attempting to delete), so the threshold is 1\n threshold = 1 if relation == expected_relation else 0\n\n # Raise a KeyError if rel is not a defined field on the db_obj model\n # This will help catch any errors in relation passed.\n if relation not in [field.name for field in self._meta.get_fields()]:\n raise KeyError(f\"{relation} is not a defined field on the {self._meta.model_name} model.\")\n\n # if attr rel in db_obj is not None, then test if reference object(s) exist\n if getattr(self, relation) is not None:\n field = self._meta.get_field(relation)\n if isinstance(field, models.OneToOneField):\n # if the rel field is a OneToOne field, then we have already\n # determined that the object exists (is not None)\n # so return True unless the relation being tested is the expected_relation\n is_not_expected_relation = relation != expected_relation\n return is_not_expected_relation\n elif isinstance(field, models.ForeignObjectRel):\n # if the rel field is a ManyToOne or ManyToMany, then we need\n # to determine if the count of related objects is greater than\n # the threshold\n return getattr(self, relation).count() > threshold\n return False\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def has_contact_info(self):\n return bool(self.title or self.email or self.phone)\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n if self.user:\n updated = False\n\n # Update first name and last name if necessary\n if not self.user.first_name or not self.user.last_name:\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n updated = True\n\n # Update phone if necessary\n if not self.user.phone:\n self.user.phone = self.phone\n updated = True\n\n # Save user if any updates were made\n if updated:\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}, {"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.utils import timezone\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenIdConnectBackend(ModelBackend):\n \"\"\"\n This backend checks a previously performed OIDC authentication.\n If it is OK and the user already exists in the database, it is returned.\n If it is OK and user does not exist in the database, it is created and\n returned unless setting OIDC_CREATE_UNKNOWN_USER is False.\n In all other cases, None is returned.\n \"\"\"\n\n def authenticate(self, request, **kwargs):\n logger.debug(\"kwargs %s\" % kwargs)\n user = None\n if not kwargs or \"sub\" not in kwargs.keys():\n return user\n\n UserModel = get_user_model()\n username = self.clean_username(kwargs[\"sub\"])\n\n # Some OP may actually choose to withhold some information, so we must\n # test if it is present\n openid_data = {\"last_login\": timezone.now()}\n openid_data[\"first_name\"] = kwargs.get(\"given_name\", \"\")\n openid_data[\"last_name\"] = kwargs.get(\"family_name\", \"\")\n openid_data[\"email\"] = kwargs.get(\"email\", \"\")\n openid_data[\"phone\"] = kwargs.get(\"phone\", \"\")\n\n # Note that this could be accomplished in one try-except clause, but\n # instead we use get_or_create when creating unknown users since it has\n # built-in safeguards for multiple threads.\n if getattr(settings, \"OIDC_CREATE_UNKNOWN_USER\", True):\n args = {\n UserModel.USERNAME_FIELD: username,\n # defaults _will_ be updated, these are not fallbacks\n \"defaults\": openid_data,\n }\n\n user, created = UserModel.objects.get_or_create(**args)\n\n if not created:\n # If user exists, update existing user\n self.update_existing_user(user, args[\"defaults\"])\n else:\n # If user is created, configure the user\n user = self.configure_user(user, **kwargs)\n else:\n try:\n user = UserModel.objects.get_by_natural_key(username)\n except UserModel.DoesNotExist:\n return None\n # run this callback for a each login\n user.on_each_login()\n return user\n\n def update_existing_user(self, user, kwargs):\n \"\"\"\n Update user fields without overwriting certain fields.\n\n Args:\n user: User object to be updated.\n kwargs: Dictionary containing fields to update and their new values.\n\n Note:\n This method updates user fields while preserving the values of 'first_name',\n 'last_name', and 'phone' fields, unless specific conditions are met.\n\n - 'first_name', 'last_name' or 'phone' will be updated if the provided value is not empty.\n \"\"\"\n\n fields_to_check = [\"first_name\", \"last_name\", \"phone\"]\n\n # Iterate over fields to update\n for key, value in kwargs.items():\n # Check if the field is not 'first_name', 'last_name', or 'phone',\n # or if it's 'first_name' or 'last_name' or 'phone' and the provided value is not empty\n if key not in fields_to_check or (key in fields_to_check and value):\n # Update the corresponding attribute of the user object\n setattr(user, key, value)\n\n # Save the user object with the updated fields\n user.save()\n\n def clean_username(self, username):\n \"\"\"\n Performs any cleaning on the \"username\" prior to using it to get or\n create the user object. Returns the cleaned username.\n \"\"\"\n return username\n\n def configure_user(self, user, **kwargs):\n \"\"\"\n Configures a user after creation and returns the updated user.\n \"\"\"\n user.set_unusable_password()\n return user\n", "path": "src/djangooidc/backends.py"}]} | 2,548 | 684 |
gh_patches_debug_4590 | rasdani/github-patches | git_diff | cowrie__cowrie-994 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
free -m command results in stack trace and cowrie freezes
From within cowrie (docker current 7 Dec 2018), when the command "free -m" is run it results a stack trace errors shown below. Cowrie freezes., and since many bots use free as info gathering, cowry may miss everything subsequent to this command.
Environment:
Ubuntu 16.04
Python 3.5.2
docker pull cowrie/cowrie
sudo iptables -t nat -A PREROUTING -p tcp --dport 22 -j REDIRECT --to-port 2222
docker run -it -p 2222:2222 -p 2223:2223 cowrie/cowrie
Console Error:
2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0
.1] CMD: free -m
2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0
.1] Command found: free -m
2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0
.1] Unhandled Error
Traceback (most recent call last):
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithCon
text
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithCont
ext
return func(*args,**kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/service.py", line 45, in packetRec
eived
return f(packet)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/connection.py", line 249, in ssh_C
HANNEL_DATA
log.callWithLogger(channel, channel.dataReceived, data)
--- <exception caught here> ---
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithCon
text
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithCont
ext
return func(*args,**kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 112, in dataRece
ived
self.client.transport.write(data)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 163, in write
self.proto.dataReceived(data)
File "/cowrie/cowrie-git/src/cowrie/insults/insults.py", line 104, in dataReceived
insults.ServerProtocol.dataReceived(self, data)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/insults/insults.py", line 537, in data
Received
self.terminalProtocol.keystrokeReceived(ch, None)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 225, in keystrokeRe
ceived
m()
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 325, in handle_RETURN
return recvline.RecvLine.handle_RETURN(self)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 292, in handle_RETU
RN
self.lineReceived(line)
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 185, in lineReceived
self.cmdstack[-1].lineReceived(line)
File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 106, in lineReceived
self.runCommand()
File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 215, in runCommand
self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 306, in call_command
HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 194, in call_command
obj.start()
File "/cowrie/cowrie-git/src/cowrie/shell/command.py", line 128, in start
self.call()
File "/cowrie/cowrie-git/src/cowrie/commands/free.py", line 41, in call
self.do_free(fmt='megabytes')
File "/cowrie/cowrie-git/src/cowrie/commands/free.py", line 60, in do_free
for key, value in raw_mem_stats.iteritems():
builtins.AttributeError: 'dict' object has no attribute 'iteritems'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/commands/free.py`
Content:
```
1 # Copyright (c) 2015 Michel Oosterhof <[email protected]>
2 # All rights reserved.
3
4 """
5 This module ...
6 """
7
8 from __future__ import absolute_import, division
9
10 import getopt
11
12 from cowrie.shell.command import HoneyPotCommand
13
14 commands = {}
15
16 FREE_OUTPUT = """ total used free shared buff/cache available
17 Mem:{MemTotal:>15}{calc_total_used:>12}{MemFree:>12}{Shmem:>12}{calc_total_buffers_and_cache:>12}{MemAvailable:>12}
18 Swap:{SwapTotal:>14}{calc_swap_used:>12}{SwapFree:>12}
19 """
20
21
22 class command_free(HoneyPotCommand):
23 """
24 free
25 """
26
27 def call(self):
28 # Parse options or display no files
29 try:
30 opts, args = getopt.getopt(self.args, 'mh')
31 except getopt.GetoptError:
32 self.do_free()
33 return
34
35 # Parse options
36 for o, a in opts:
37 if o in ('-h'):
38 self.do_free(fmt='human')
39 return
40 elif o in ('-m'):
41 self.do_free(fmt='megabytes')
42 return
43 self.do_free()
44
45 def do_free(self, fmt='kilobytes'):
46 """
47 print free statistics
48 """
49
50 # Get real host memstats and add the calculated fields
51 raw_mem_stats = self.get_free_stats()
52 raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
53 raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (
54 raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache']
55 )
56 raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
57
58 if fmt == 'megabytes':
59 # Transform KB to MB
60 for key, value in raw_mem_stats.iteritems():
61 raw_mem_stats[key] = int(value / 1000)
62 elif fmt == 'human':
63 magnitude = ["B", "M", "G", "T", "Z"]
64 for key, value in raw_mem_stats.iteritems():
65 current_magnitude = 0
66
67 # Keep dividing until we get a sane magnitude
68 while(value >= 1000 and current_magnitude < len(magnitude)):
69 value = round(float(value / 1000), 1)
70 current_magnitude += 1
71
72 # Format to string and append value with new magnitude
73 raw_mem_stats[key] = str("{:g}{}".format(value, magnitude[current_magnitude]))
74
75 # Write the output to screen
76 self.write(FREE_OUTPUT.format(**raw_mem_stats))
77
78 def get_free_stats(self):
79 """
80 Get the free stats from /proc
81 """
82 needed_keys = ["Buffers", "Cached", "MemTotal", "MemFree", "SwapTotal", "SwapFree", "Shmem", "MemAvailable"]
83 mem_info_map = {}
84 with open('/proc/meminfo', 'r') as proc_file:
85 for line in proc_file:
86 tokens = line.split(':')
87
88 # Later we are going to do some math on those numbers, better not include uneeded keys for performance
89 if tokens[0] in needed_keys:
90 mem_info_map[tokens[0]] = int(tokens[1].lstrip().split(' ')[0])
91
92 # Got a map with all tokens from /proc/meminfo and sizes in KBs
93 return mem_info_map
94
95
96 commands['/usr/bin/free'] = command_free
97 commands['free'] = command_free
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cowrie/commands/free.py b/src/cowrie/commands/free.py
--- a/src/cowrie/commands/free.py
+++ b/src/cowrie/commands/free.py
@@ -57,7 +57,7 @@
if fmt == 'megabytes':
# Transform KB to MB
- for key, value in raw_mem_stats.iteritems():
+ for key, value in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
elif fmt == 'human':
magnitude = ["B", "M", "G", "T", "Z"]
| {"golden_diff": "diff --git a/src/cowrie/commands/free.py b/src/cowrie/commands/free.py\n--- a/src/cowrie/commands/free.py\n+++ b/src/cowrie/commands/free.py\n@@ -57,7 +57,7 @@\n \n if fmt == 'megabytes':\n # Transform KB to MB\n- for key, value in raw_mem_stats.iteritems():\n+ for key, value in raw_mem_stats.items():\n raw_mem_stats[key] = int(value / 1000)\n elif fmt == 'human':\n magnitude = [\"B\", \"M\", \"G\", \"T\", \"Z\"]\n", "issue": "free -m command results in stack trace and cowrie freezes\nFrom within cowrie (docker current 7 Dec 2018), when the command \"free -m\" is run it results a stack trace errors shown below. Cowrie freezes., and since many bots use free as info gathering, cowry may miss everything subsequent to this command.\r\n\r\nEnvironment:\r\nUbuntu 16.04\r\nPython 3.5.2\r\ndocker pull cowrie/cowrie\r\nsudo iptables -t nat -A PREROUTING -p tcp --dport 22 -j REDIRECT --to-port 2222\r\ndocker run -it -p 2222:2222 -p 2223:2223 cowrie/cowrie\r\n\r\nConsole Error:\r\n2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0\r\n.1] CMD: free -m\r\n2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0\r\n.1] Command found: free -m\r\n2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0\r\n.1] Unhandled Error\r\n Traceback (most recent call last):\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 122, in callWithCon\r\ntext\r\n return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 85, in callWithCont\r\next\r\n return func(*args,**kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/service.py\", line 45, in packetRec\r\neived\r\n return f(packet)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/connection.py\", line 249, in ssh_C\r\nHANNEL_DATA\r\n log.callWithLogger(channel, channel.dataReceived, data)\r\n --- <exception caught here> ---\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py\", line 103, in callWithLogger\r\n return callWithContext({\"system\": lp}, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py\", line 86, in callWithContext\r\n return context.call({ILogContext: newCtx}, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 122, in callWithCon\r\ntext\r\n return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 85, in callWithCont\r\next\r\n return func(*args,**kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py\", line 112, in dataRece\r\nived\r\n self.client.transport.write(data)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py\", line 163, in write\r\n self.proto.dataReceived(data)\r\n File \"/cowrie/cowrie-git/src/cowrie/insults/insults.py\", line 104, in dataReceived\r\n insults.ServerProtocol.dataReceived(self, data)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/insults/insults.py\", line 537, in data\r\nReceived\r\n self.terminalProtocol.keystrokeReceived(ch, None)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py\", line 225, in keystrokeRe\r\nceived\r\n m()\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 325, in handle_RETURN\r\n return recvline.RecvLine.handle_RETURN(self)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py\", line 292, in handle_RETU\r\nRN\r\n self.lineReceived(line)\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 185, in lineReceived\r\n self.cmdstack[-1].lineReceived(line)\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/honeypot.py\", line 106, in lineReceived\r\n self.runCommand()\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/honeypot.py\", line 215, in runCommand\r\n self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 306, in call_command\r\n HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 194, in call_command\r\n obj.start()\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/command.py\", line 128, in start\r\n self.call()\r\n File \"/cowrie/cowrie-git/src/cowrie/commands/free.py\", line 41, in call\r\n self.do_free(fmt='megabytes')\r\n File \"/cowrie/cowrie-git/src/cowrie/commands/free.py\", line 60, in do_free\r\n for key, value in raw_mem_stats.iteritems():\r\n builtins.AttributeError: 'dict' object has no attribute 'iteritems'\r\n\n", "before_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n\n\"\"\"\nThis module ...\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport getopt\n\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\nFREE_OUTPUT = \"\"\" total used free shared buff/cache available\nMem:{MemTotal:>15}{calc_total_used:>12}{MemFree:>12}{Shmem:>12}{calc_total_buffers_and_cache:>12}{MemAvailable:>12}\nSwap:{SwapTotal:>14}{calc_swap_used:>12}{SwapFree:>12}\n\"\"\"\n\n\nclass command_free(HoneyPotCommand):\n \"\"\"\n free\n \"\"\"\n\n def call(self):\n # Parse options or display no files\n try:\n opts, args = getopt.getopt(self.args, 'mh')\n except getopt.GetoptError:\n self.do_free()\n return\n\n # Parse options\n for o, a in opts:\n if o in ('-h'):\n self.do_free(fmt='human')\n return\n elif o in ('-m'):\n self.do_free(fmt='megabytes')\n return\n self.do_free()\n\n def do_free(self, fmt='kilobytes'):\n \"\"\"\n print free statistics\n \"\"\"\n\n # Get real host memstats and add the calculated fields\n raw_mem_stats = self.get_free_stats()\n raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']\n raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (\n raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache']\n )\n raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']\n\n if fmt == 'megabytes':\n # Transform KB to MB\n for key, value in raw_mem_stats.iteritems():\n raw_mem_stats[key] = int(value / 1000)\n elif fmt == 'human':\n magnitude = [\"B\", \"M\", \"G\", \"T\", \"Z\"]\n for key, value in raw_mem_stats.iteritems():\n current_magnitude = 0\n\n # Keep dividing until we get a sane magnitude\n while(value >= 1000 and current_magnitude < len(magnitude)):\n value = round(float(value / 1000), 1)\n current_magnitude += 1\n\n # Format to string and append value with new magnitude\n raw_mem_stats[key] = str(\"{:g}{}\".format(value, magnitude[current_magnitude]))\n\n # Write the output to screen\n self.write(FREE_OUTPUT.format(**raw_mem_stats))\n\n def get_free_stats(self):\n \"\"\"\n Get the free stats from /proc\n \"\"\"\n needed_keys = [\"Buffers\", \"Cached\", \"MemTotal\", \"MemFree\", \"SwapTotal\", \"SwapFree\", \"Shmem\", \"MemAvailable\"]\n mem_info_map = {}\n with open('/proc/meminfo', 'r') as proc_file:\n for line in proc_file:\n tokens = line.split(':')\n\n # Later we are going to do some math on those numbers, better not include uneeded keys for performance\n if tokens[0] in needed_keys:\n mem_info_map[tokens[0]] = int(tokens[1].lstrip().split(' ')[0])\n\n # Got a map with all tokens from /proc/meminfo and sizes in KBs\n return mem_info_map\n\n\ncommands['/usr/bin/free'] = command_free\ncommands['free'] = command_free\n", "path": "src/cowrie/commands/free.py"}], "after_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n\n\"\"\"\nThis module ...\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport getopt\n\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\nFREE_OUTPUT = \"\"\" total used free shared buff/cache available\nMem:{MemTotal:>15}{calc_total_used:>12}{MemFree:>12}{Shmem:>12}{calc_total_buffers_and_cache:>12}{MemAvailable:>12}\nSwap:{SwapTotal:>14}{calc_swap_used:>12}{SwapFree:>12}\n\"\"\"\n\n\nclass command_free(HoneyPotCommand):\n \"\"\"\n free\n \"\"\"\n\n def call(self):\n # Parse options or display no files\n try:\n opts, args = getopt.getopt(self.args, 'mh')\n except getopt.GetoptError:\n self.do_free()\n return\n\n # Parse options\n for o, a in opts:\n if o in ('-h'):\n self.do_free(fmt='human')\n return\n elif o in ('-m'):\n self.do_free(fmt='megabytes')\n return\n self.do_free()\n\n def do_free(self, fmt='kilobytes'):\n \"\"\"\n print free statistics\n \"\"\"\n\n # Get real host memstats and add the calculated fields\n raw_mem_stats = self.get_free_stats()\n raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']\n raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (\n raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache']\n )\n raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']\n\n if fmt == 'megabytes':\n # Transform KB to MB\n for key, value in raw_mem_stats.items():\n raw_mem_stats[key] = int(value / 1000)\n elif fmt == 'human':\n magnitude = [\"B\", \"M\", \"G\", \"T\", \"Z\"]\n for key, value in raw_mem_stats.iteritems():\n current_magnitude = 0\n\n # Keep dividing until we get a sane magnitude\n while(value >= 1000 and current_magnitude < len(magnitude)):\n value = round(float(value / 1000), 1)\n current_magnitude += 1\n\n # Format to string and append value with new magnitude\n raw_mem_stats[key] = str(\"{:g}{}\".format(value, magnitude[current_magnitude]))\n\n # Write the output to screen\n self.write(FREE_OUTPUT.format(**raw_mem_stats))\n\n def get_free_stats(self):\n \"\"\"\n Get the free stats from /proc\n \"\"\"\n needed_keys = [\"Buffers\", \"Cached\", \"MemTotal\", \"MemFree\", \"SwapTotal\", \"SwapFree\", \"Shmem\", \"MemAvailable\"]\n mem_info_map = {}\n with open('/proc/meminfo', 'r') as proc_file:\n for line in proc_file:\n tokens = line.split(':')\n\n # Later we are going to do some math on those numbers, better not include uneeded keys for performance\n if tokens[0] in needed_keys:\n mem_info_map[tokens[0]] = int(tokens[1].lstrip().split(' ')[0])\n\n # Got a map with all tokens from /proc/meminfo and sizes in KBs\n return mem_info_map\n\n\ncommands['/usr/bin/free'] = command_free\ncommands['free'] = command_free\n", "path": "src/cowrie/commands/free.py"}]} | 2,663 | 137 |
gh_patches_debug_65703 | rasdani/github-patches | git_diff | carpentries__amy-1793 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: assignment form queryset may return duplicate results
Introduced in v2.16, AssignmentForm contains a queryset that may yield duplicate results due to the filtering used.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/dashboard/forms.py`
Content:
```
1 from django import forms
2 from django.core.exceptions import ValidationError
3 from django.db.models import Q
4 from django_countries.fields import CountryField
5
6 from workshops.models import (
7 Language,
8 GenderMixin,
9 Person,
10 TrainingProgress,
11 TrainingRequirement,
12 )
13
14 from workshops.forms import BootstrapHelper
15 # this is used instead of Django Autocomplete Light widgets
16 # see issue #1330: https://github.com/swcarpentry/amy/issues/1330
17 from workshops.fields import (
18 Select2Widget,
19 ModelSelect2MultipleWidget,
20 RadioSelectWithOther,
21 )
22
23
24 class AssignmentForm(forms.Form):
25 assigned_to = forms.ModelChoiceField(
26 label="Assigned to:",
27 required=False,
28 queryset=Person.objects.filter(
29 Q(is_superuser=True) | Q(groups__name="administrators")
30 ),
31 widget=Select2Widget(),
32 )
33 helper = BootstrapHelper(
34 add_submit_button=False,
35 add_cancel_button=False,
36 wider_labels=True,
37 use_get_method=True,
38 form_id="assignment-form"
39 )
40
41
42 class AutoUpdateProfileForm(forms.ModelForm):
43 username = forms.CharField(disabled=True, required=False)
44 email = forms.CharField(
45 disabled=True, required=False,
46 label=Person._meta.get_field('email').verbose_name,
47 help_text=Person._meta.get_field('email').help_text,
48 )
49 github = forms.CharField(
50 disabled=True, required=False,
51 help_text='If you want to change your github username, please email '
52 'us at <a href="mailto:[email protected]">'
53 '[email protected]</a>.')
54
55 country = CountryField().formfield(
56 required=False,
57 help_text='Your country of residence.',
58 widget=Select2Widget,
59 )
60
61 languages = forms.ModelMultipleChoiceField(
62 label='Languages',
63 required=False,
64 queryset=Language.objects.all(),
65 widget=ModelSelect2MultipleWidget(data_view='language-lookup')
66 )
67
68 helper = BootstrapHelper(add_cancel_button=False)
69
70 class Meta:
71 model = Person
72 fields = [
73 'personal',
74 'middle',
75 'family',
76 'email',
77 'secondary_email',
78 'gender',
79 'gender_other',
80 'may_contact',
81 'publish_profile',
82 'lesson_publication_consent',
83 'country',
84 'airport',
85 'github',
86 'twitter',
87 'url',
88 'username',
89 'affiliation',
90 'domains',
91 'lessons',
92 'languages',
93 'occupation',
94 'orcid',
95 ]
96 readonly_fields = (
97 'username',
98 'github',
99 )
100 widgets = {
101 'gender': RadioSelectWithOther('gender_other'),
102 'domains': forms.CheckboxSelectMultiple(),
103 'lessons': forms.CheckboxSelectMultiple(),
104 'airport': Select2Widget,
105 }
106
107 def __init__(self, *args, **kwargs):
108 super().__init__(*args, **kwargs)
109
110 # set up a layout object for the helper
111 self.helper.layout = self.helper.build_default_layout(self)
112
113 # set up `*WithOther` widgets so that they can display additional
114 # fields inline
115 self['gender'].field.widget.other_field = self['gender_other']
116
117 # remove additional fields
118 self.helper.layout.fields.remove('gender_other')
119
120 def clean(self):
121 super().clean()
122 errors = dict()
123
124 # 1: require "other gender" field if "other" was selected in
125 # "gender" field
126 gender = self.cleaned_data.get('gender', '')
127 gender_other = self.cleaned_data.get('gender_other', '')
128 if gender == GenderMixin.OTHER and not gender_other:
129 errors['gender'] = ValidationError("This field is required.")
130 elif gender != GenderMixin.OTHER and gender_other:
131 errors['gender'] = ValidationError(
132 'If you entered data in "Other" field, please select that '
133 "option.")
134
135 # raise errors if any present
136 if errors:
137 raise ValidationError(errors)
138
139
140 class SendHomeworkForm(forms.ModelForm):
141 url = forms.URLField(label='URL')
142 requirement = forms.ModelChoiceField(
143 queryset=TrainingRequirement.objects.filter(name__endswith="Homework"),
144 label="Type", required=True,
145 )
146
147 helper = BootstrapHelper(add_cancel_button=False)
148
149 class Meta:
150 model = TrainingProgress
151 fields = [
152 'requirement',
153 'url',
154 ]
155
156
157 class SearchForm(forms.Form):
158 """Represent general searching form."""
159
160 term = forms.CharField(label="Term", max_length=100)
161 no_redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)
162 helper = BootstrapHelper(add_cancel_button=False, use_get_method=True)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/dashboard/forms.py b/amy/dashboard/forms.py
--- a/amy/dashboard/forms.py
+++ b/amy/dashboard/forms.py
@@ -27,7 +27,7 @@
required=False,
queryset=Person.objects.filter(
Q(is_superuser=True) | Q(groups__name="administrators")
- ),
+ ).distinct(),
widget=Select2Widget(),
)
helper = BootstrapHelper(
| {"golden_diff": "diff --git a/amy/dashboard/forms.py b/amy/dashboard/forms.py\n--- a/amy/dashboard/forms.py\n+++ b/amy/dashboard/forms.py\n@@ -27,7 +27,7 @@\n required=False,\n queryset=Person.objects.filter(\n Q(is_superuser=True) | Q(groups__name=\"administrators\")\n- ),\n+ ).distinct(),\n widget=Select2Widget(),\n )\n helper = BootstrapHelper(\n", "issue": "Bug: assignment form queryset may return duplicate results\nIntroduced in v2.16, AssignmentForm contains a queryset that may yield duplicate results due to the filtering used.\n", "before_files": [{"content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django_countries.fields import CountryField\n\nfrom workshops.models import (\n Language,\n GenderMixin,\n Person,\n TrainingProgress,\n TrainingRequirement,\n)\n\nfrom workshops.forms import BootstrapHelper\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import (\n Select2Widget,\n ModelSelect2MultipleWidget,\n RadioSelectWithOther,\n)\n\n\nclass AssignmentForm(forms.Form):\n assigned_to = forms.ModelChoiceField(\n label=\"Assigned to:\",\n required=False,\n queryset=Person.objects.filter(\n Q(is_superuser=True) | Q(groups__name=\"administrators\")\n ),\n widget=Select2Widget(),\n )\n helper = BootstrapHelper(\n add_submit_button=False,\n add_cancel_button=False,\n wider_labels=True,\n use_get_method=True,\n form_id=\"assignment-form\"\n )\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n email = forms.CharField(\n disabled=True, required=False,\n label=Person._meta.get_field('email').verbose_name,\n help_text=Person._meta.get_field('email').help_text,\n )\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n country = CountryField().formfield(\n required=False,\n help_text='Your country of residence.',\n widget=Select2Widget,\n )\n\n languages = forms.ModelMultipleChoiceField(\n label='Languages',\n required=False,\n queryset=Language.objects.all(),\n widget=ModelSelect2MultipleWidget(data_view='language-lookup')\n )\n\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'secondary_email',\n 'gender',\n 'gender_other',\n 'may_contact',\n 'publish_profile',\n 'lesson_publication_consent',\n 'country',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n 'occupation',\n 'orcid',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'gender': RadioSelectWithOther('gender_other'),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'airport': Select2Widget,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set up a layout object for the helper\n self.helper.layout = self.helper.build_default_layout(self)\n\n # set up `*WithOther` widgets so that they can display additional\n # fields inline\n self['gender'].field.widget.other_field = self['gender_other']\n\n # remove additional fields\n self.helper.layout.fields.remove('gender_other')\n\n def clean(self):\n super().clean()\n errors = dict()\n\n # 1: require \"other gender\" field if \"other\" was selected in\n # \"gender\" field\n gender = self.cleaned_data.get('gender', '')\n gender_other = self.cleaned_data.get('gender_other', '')\n if gender == GenderMixin.OTHER and not gender_other:\n errors['gender'] = ValidationError(\"This field is required.\")\n elif gender != GenderMixin.OTHER and gender_other:\n errors['gender'] = ValidationError(\n 'If you entered data in \"Other\" field, please select that '\n \"option.\")\n\n # raise errors if any present\n if errors:\n raise ValidationError(errors)\n\n\nclass SendHomeworkForm(forms.ModelForm):\n url = forms.URLField(label='URL')\n requirement = forms.ModelChoiceField(\n queryset=TrainingRequirement.objects.filter(name__endswith=\"Homework\"),\n label=\"Type\", required=True,\n )\n\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = TrainingProgress\n fields = [\n 'requirement',\n 'url',\n ]\n\n\nclass SearchForm(forms.Form):\n \"\"\"Represent general searching form.\"\"\"\n\n term = forms.CharField(label=\"Term\", max_length=100)\n no_redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)\n helper = BootstrapHelper(add_cancel_button=False, use_get_method=True)\n", "path": "amy/dashboard/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django_countries.fields import CountryField\n\nfrom workshops.models import (\n Language,\n GenderMixin,\n Person,\n TrainingProgress,\n TrainingRequirement,\n)\n\nfrom workshops.forms import BootstrapHelper\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import (\n Select2Widget,\n ModelSelect2MultipleWidget,\n RadioSelectWithOther,\n)\n\n\nclass AssignmentForm(forms.Form):\n assigned_to = forms.ModelChoiceField(\n label=\"Assigned to:\",\n required=False,\n queryset=Person.objects.filter(\n Q(is_superuser=True) | Q(groups__name=\"administrators\")\n ).distinct(),\n widget=Select2Widget(),\n )\n helper = BootstrapHelper(\n add_submit_button=False,\n add_cancel_button=False,\n wider_labels=True,\n use_get_method=True,\n form_id=\"assignment-form\"\n )\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n email = forms.CharField(\n disabled=True, required=False,\n label=Person._meta.get_field('email').verbose_name,\n help_text=Person._meta.get_field('email').help_text,\n )\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n country = CountryField().formfield(\n required=False,\n help_text='Your country of residence.',\n widget=Select2Widget,\n )\n\n languages = forms.ModelMultipleChoiceField(\n label='Languages',\n required=False,\n queryset=Language.objects.all(),\n widget=ModelSelect2MultipleWidget(data_view='language-lookup')\n )\n\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'secondary_email',\n 'gender',\n 'gender_other',\n 'may_contact',\n 'publish_profile',\n 'lesson_publication_consent',\n 'country',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n 'occupation',\n 'orcid',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'gender': RadioSelectWithOther('gender_other'),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'airport': Select2Widget,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set up a layout object for the helper\n self.helper.layout = self.helper.build_default_layout(self)\n\n # set up `*WithOther` widgets so that they can display additional\n # fields inline\n self['gender'].field.widget.other_field = self['gender_other']\n\n # remove additional fields\n self.helper.layout.fields.remove('gender_other')\n\n def clean(self):\n super().clean()\n errors = dict()\n\n # 1: require \"other gender\" field if \"other\" was selected in\n # \"gender\" field\n gender = self.cleaned_data.get('gender', '')\n gender_other = self.cleaned_data.get('gender_other', '')\n if gender == GenderMixin.OTHER and not gender_other:\n errors['gender'] = ValidationError(\"This field is required.\")\n elif gender != GenderMixin.OTHER and gender_other:\n errors['gender'] = ValidationError(\n 'If you entered data in \"Other\" field, please select that '\n \"option.\")\n\n # raise errors if any present\n if errors:\n raise ValidationError(errors)\n\n\nclass SendHomeworkForm(forms.ModelForm):\n url = forms.URLField(label='URL')\n requirement = forms.ModelChoiceField(\n queryset=TrainingRequirement.objects.filter(name__endswith=\"Homework\"),\n label=\"Type\", required=True,\n )\n\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = TrainingProgress\n fields = [\n 'requirement',\n 'url',\n ]\n\n\nclass SearchForm(forms.Form):\n \"\"\"Represent general searching form.\"\"\"\n\n term = forms.CharField(label=\"Term\", max_length=100)\n no_redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)\n helper = BootstrapHelper(add_cancel_button=False, use_get_method=True)\n", "path": "amy/dashboard/forms.py"}]} | 1,686 | 93 |
gh_patches_debug_201 | rasdani/github-patches | git_diff | blaze__blaze-475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make blaze.test() return True or False
@asmeurer suggests this. Currently we're passing through pytest.main() which is like the error code from command line programs.
<!---
@huboard:{"order":398.859375,"milestone_order":452,"custom_state":""}
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `blaze/__init__.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import logging
4
5 from dynd import nd
6 from pandas import DataFrame
7 import h5py
8
9 from multipledispatch import halt_ordering, restart_ordering
10
11 halt_ordering() # Turn off multipledispatch ordering
12
13 from .expr import *
14 from .expr.functions import *
15 from .api import *
16 from .data.csv import *
17 from .data.json import *
18 from .data.hdf5 import *
19 from .compute.python import *
20 from .data.meta import *
21 from .compute.pandas import *
22 from .compute.numpy import *
23 from .compute.core import *
24 from .compute.core import compute
25 from .sql import *
26
27 try:
28 from .spark import *
29 except ImportError:
30 pass
31 try:
32 from .compute.pytables import *
33 except ImportError:
34 pass
35 try:
36 from .compute.chunks import *
37 except ImportError:
38 pass
39 try:
40 from .bcolz import *
41 except ImportError:
42 pass
43 try:
44 from .mongo import *
45 except ImportError:
46 pass
47
48 restart_ordering() # Restart multipledispatch ordering and do ordering
49
50 logging.basicConfig()
51 logger = logging.getLogger(__name__)
52 logger.setLevel(logging.WARNING)
53
54
55 inf = float('inf')
56 nan = float('nan')
57
58 __version__ = '0.6.1'
59
60 # If IPython is already loaded, register the Blaze catalog magic
61 # from . import catalog
62 # import sys
63 # if 'IPython' in sys.modules:
64 # catalog.register_ipy_magic()
65 # del sys
66
67 def print_versions():
68 """Print all the versions of software that Blaze relies on."""
69 import sys, platform
70 import numpy as np
71 import dynd
72 import datashape
73 print("-=" * 38)
74 print("Blaze version: %s" % __version__)
75 print("Datashape version: %s" % datashape.__version__)
76 print("NumPy version: %s" % np.__version__)
77 print("DyND version: %s / LibDyND %s" %
78 (dynd.__version__, dynd.__libdynd_version__))
79 print("Python version: %s" % sys.version)
80 (sysname, nodename, release, version, machine, processor) = \
81 platform.uname()
82 print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
83 if sysname == "Linux":
84 print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
85 if not processor:
86 processor = "not recognized"
87 print("Processor: %s" % processor)
88 print("Byte-ordering: %s" % sys.byteorder)
89 print("-=" * 38)
90
91
92 def test(verbose=False, junitfile=None, exit=False):
93 """
94 Runs the full Blaze test suite, outputting
95 the results of the tests to sys.stdout.
96
97 This uses py.test to discover which tests to
98 run, and runs tests in any 'tests' subdirectory
99 within the Blaze module.
100
101 Parameters
102 ----------
103 verbose : int, optional
104 Value 0 prints very little, 1 prints a little bit,
105 and 2 prints the test names while testing.
106 junitfile : string, optional
107 If provided, writes the test results to an junit xml
108 style xml file. This is useful for running the tests
109 in a CI server such as Jenkins.
110 exit : bool, optional
111 If True, the function will call sys.exit with an
112 error code after the tests are finished.
113 """
114 import os
115 import sys
116 import pytest
117
118 args = []
119
120 if verbose:
121 args.append('--verbose')
122
123 # Output an xunit file if requested
124 if junitfile is not None:
125 args.append('--junit-xml=%s' % junitfile)
126
127 # Add all 'tests' subdirectories to the options
128 rootdir = os.path.dirname(__file__)
129 for root, dirs, files in os.walk(rootdir):
130 if 'tests' in dirs:
131 testsdir = os.path.join(root, 'tests')
132 args.append(testsdir)
133 print('Test dir: %s' % testsdir[len(rootdir) + 1:])
134 # print versions (handy when reporting problems)
135 print_versions()
136 sys.stdout.flush()
137
138 # Ask pytest to do its thing
139 error_code = pytest.main(args=args)
140 if exit:
141 return sys.exit(error_code)
142 return error_code
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/blaze/__init__.py b/blaze/__init__.py
--- a/blaze/__init__.py
+++ b/blaze/__init__.py
@@ -139,4 +139,4 @@
error_code = pytest.main(args=args)
if exit:
return sys.exit(error_code)
- return error_code
+ return error_code == 0
| {"golden_diff": "diff --git a/blaze/__init__.py b/blaze/__init__.py\n--- a/blaze/__init__.py\n+++ b/blaze/__init__.py\n@@ -139,4 +139,4 @@\n error_code = pytest.main(args=args)\n if exit:\n return sys.exit(error_code)\n- return error_code\n+ return error_code == 0\n", "issue": "Make blaze.test() return True or False\n@asmeurer suggests this. Currently we're passing through pytest.main() which is like the error code from command line programs.\n\n<!---\n@huboard:{\"order\":398.859375,\"milestone_order\":452,\"custom_state\":\"\"}\n-->\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport logging\n\nfrom dynd import nd\nfrom pandas import DataFrame\nimport h5py\n\nfrom multipledispatch import halt_ordering, restart_ordering\n\nhalt_ordering() # Turn off multipledispatch ordering\n\nfrom .expr import *\nfrom .expr.functions import *\nfrom .api import *\nfrom .data.csv import *\nfrom .data.json import *\nfrom .data.hdf5 import *\nfrom .compute.python import *\nfrom .data.meta import *\nfrom .compute.pandas import *\nfrom .compute.numpy import *\nfrom .compute.core import *\nfrom .compute.core import compute\nfrom .sql import *\n\ntry:\n from .spark import *\nexcept ImportError:\n pass\ntry:\n from .compute.pytables import *\nexcept ImportError:\n pass\ntry:\n from .compute.chunks import *\nexcept ImportError:\n pass\ntry:\n from .bcolz import *\nexcept ImportError:\n pass\ntry:\n from .mongo import *\nexcept ImportError:\n pass\n\nrestart_ordering() # Restart multipledispatch ordering and do ordering\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\n\n\ninf = float('inf')\nnan = float('nan')\n\n__version__ = '0.6.1'\n\n# If IPython is already loaded, register the Blaze catalog magic\n# from . import catalog\n# import sys\n# if 'IPython' in sys.modules:\n# catalog.register_ipy_magic()\n# del sys\n\ndef print_versions():\n \"\"\"Print all the versions of software that Blaze relies on.\"\"\"\n import sys, platform\n import numpy as np\n import dynd\n import datashape\n print(\"-=\" * 38)\n print(\"Blaze version: %s\" % __version__)\n print(\"Datashape version: %s\" % datashape.__version__)\n print(\"NumPy version: %s\" % np.__version__)\n print(\"DyND version: %s / LibDyND %s\" %\n (dynd.__version__, dynd.__libdynd_version__))\n print(\"Python version: %s\" % sys.version)\n (sysname, nodename, release, version, machine, processor) = \\\n platform.uname()\n print(\"Platform: %s-%s-%s (%s)\" % (sysname, release, machine, version))\n if sysname == \"Linux\":\n print(\"Linux dist: %s\" % \" \".join(platform.linux_distribution()[:-1]))\n if not processor:\n processor = \"not recognized\"\n print(\"Processor: %s\" % processor)\n print(\"Byte-ordering: %s\" % sys.byteorder)\n print(\"-=\" * 38)\n\n\ndef test(verbose=False, junitfile=None, exit=False):\n \"\"\"\n Runs the full Blaze test suite, outputting\n the results of the tests to sys.stdout.\n\n This uses py.test to discover which tests to\n run, and runs tests in any 'tests' subdirectory\n within the Blaze module.\n\n Parameters\n ----------\n verbose : int, optional\n Value 0 prints very little, 1 prints a little bit,\n and 2 prints the test names while testing.\n junitfile : string, optional\n If provided, writes the test results to an junit xml\n style xml file. This is useful for running the tests\n in a CI server such as Jenkins.\n exit : bool, optional\n If True, the function will call sys.exit with an\n error code after the tests are finished.\n \"\"\"\n import os\n import sys\n import pytest\n\n args = []\n\n if verbose:\n args.append('--verbose')\n\n # Output an xunit file if requested\n if junitfile is not None:\n args.append('--junit-xml=%s' % junitfile)\n\n # Add all 'tests' subdirectories to the options\n rootdir = os.path.dirname(__file__)\n for root, dirs, files in os.walk(rootdir):\n if 'tests' in dirs:\n testsdir = os.path.join(root, 'tests')\n args.append(testsdir)\n print('Test dir: %s' % testsdir[len(rootdir) + 1:])\n # print versions (handy when reporting problems)\n print_versions()\n sys.stdout.flush()\n\n # Ask pytest to do its thing\n error_code = pytest.main(args=args)\n if exit:\n return sys.exit(error_code)\n return error_code\n", "path": "blaze/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport logging\n\nfrom dynd import nd\nfrom pandas import DataFrame\nimport h5py\n\nfrom multipledispatch import halt_ordering, restart_ordering\n\nhalt_ordering() # Turn off multipledispatch ordering\n\nfrom .expr import *\nfrom .expr.functions import *\nfrom .api import *\nfrom .data.csv import *\nfrom .data.json import *\nfrom .data.hdf5 import *\nfrom .compute.python import *\nfrom .data.meta import *\nfrom .compute.pandas import *\nfrom .compute.numpy import *\nfrom .compute.core import *\nfrom .compute.core import compute\nfrom .sql import *\n\ntry:\n from .spark import *\nexcept ImportError:\n pass\ntry:\n from .compute.pytables import *\nexcept ImportError:\n pass\ntry:\n from .compute.chunks import *\nexcept ImportError:\n pass\ntry:\n from .bcolz import *\nexcept ImportError:\n pass\ntry:\n from .mongo import *\nexcept ImportError:\n pass\n\nrestart_ordering() # Restart multipledispatch ordering and do ordering\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\n\n\ninf = float('inf')\nnan = float('nan')\n\n__version__ = '0.6.1'\n\n# If IPython is already loaded, register the Blaze catalog magic\n# from . import catalog\n# import sys\n# if 'IPython' in sys.modules:\n# catalog.register_ipy_magic()\n# del sys\n\ndef print_versions():\n \"\"\"Print all the versions of software that Blaze relies on.\"\"\"\n import sys, platform\n import numpy as np\n import dynd\n import datashape\n print(\"-=\" * 38)\n print(\"Blaze version: %s\" % __version__)\n print(\"Datashape version: %s\" % datashape.__version__)\n print(\"NumPy version: %s\" % np.__version__)\n print(\"DyND version: %s / LibDyND %s\" %\n (dynd.__version__, dynd.__libdynd_version__))\n print(\"Python version: %s\" % sys.version)\n (sysname, nodename, release, version, machine, processor) = \\\n platform.uname()\n print(\"Platform: %s-%s-%s (%s)\" % (sysname, release, machine, version))\n if sysname == \"Linux\":\n print(\"Linux dist: %s\" % \" \".join(platform.linux_distribution()[:-1]))\n if not processor:\n processor = \"not recognized\"\n print(\"Processor: %s\" % processor)\n print(\"Byte-ordering: %s\" % sys.byteorder)\n print(\"-=\" * 38)\n\n\ndef test(verbose=False, junitfile=None, exit=False):\n \"\"\"\n Runs the full Blaze test suite, outputting\n the results of the tests to sys.stdout.\n\n This uses py.test to discover which tests to\n run, and runs tests in any 'tests' subdirectory\n within the Blaze module.\n\n Parameters\n ----------\n verbose : int, optional\n Value 0 prints very little, 1 prints a little bit,\n and 2 prints the test names while testing.\n junitfile : string, optional\n If provided, writes the test results to an junit xml\n style xml file. This is useful for running the tests\n in a CI server such as Jenkins.\n exit : bool, optional\n If True, the function will call sys.exit with an\n error code after the tests are finished.\n \"\"\"\n import os\n import sys\n import pytest\n\n args = []\n\n if verbose:\n args.append('--verbose')\n\n # Output an xunit file if requested\n if junitfile is not None:\n args.append('--junit-xml=%s' % junitfile)\n\n # Add all 'tests' subdirectories to the options\n rootdir = os.path.dirname(__file__)\n for root, dirs, files in os.walk(rootdir):\n if 'tests' in dirs:\n testsdir = os.path.join(root, 'tests')\n args.append(testsdir)\n print('Test dir: %s' % testsdir[len(rootdir) + 1:])\n # print versions (handy when reporting problems)\n print_versions()\n sys.stdout.flush()\n\n # Ask pytest to do its thing\n error_code = pytest.main(args=args)\n if exit:\n return sys.exit(error_code)\n return error_code == 0\n", "path": "blaze/__init__.py"}]} | 1,626 | 85 |
gh_patches_debug_30662 | rasdani/github-patches | git_diff | goauthentik__authentik-4428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
System Tasks: Show start timestamp and calculate Duration
**Is your feature request related to a problem? Please describe.**
For debugging purposes, I need the info when a task started, and when it finished.
**Describe the solution you'd like**
I have seen that the TaskInfo object actually holds that information, but it is not returned by the API, and not shown in the "SystemTasks" table of the web UI.
It would also make sense to calculate the duration for easier debugging.
**Describe alternatives you've considered**
I could look this up in the database, but this would be questionable UX, since there is already a view in the web app which should show this information.
**Additional context**
(none)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/admin/api/tasks.py`
Content:
```
1 """Tasks API"""
2 from importlib import import_module
3
4 from django.contrib import messages
5 from django.http.response import Http404
6 from django.utils.translation import gettext_lazy as _
7 from drf_spectacular.types import OpenApiTypes
8 from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema
9 from rest_framework.decorators import action
10 from rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField
11 from rest_framework.permissions import IsAdminUser
12 from rest_framework.request import Request
13 from rest_framework.response import Response
14 from rest_framework.viewsets import ViewSet
15 from structlog.stdlib import get_logger
16
17 from authentik.core.api.utils import PassiveSerializer
18 from authentik.events.monitored_tasks import TaskInfo, TaskResultStatus
19
20 LOGGER = get_logger()
21
22
23 class TaskSerializer(PassiveSerializer):
24 """Serialize TaskInfo and TaskResult"""
25
26 task_name = CharField()
27 task_description = CharField()
28 task_finish_timestamp = DateTimeField(source="finish_time")
29
30 status = ChoiceField(
31 source="result.status.name",
32 choices=[(x.name, x.name) for x in TaskResultStatus],
33 )
34 messages = ListField(source="result.messages")
35
36 def to_representation(self, instance):
37 """When a new version of authentik adds fields to TaskInfo,
38 the API will fail with an AttributeError, as the classes
39 are pickled in cache. In that case, just delete the info"""
40 try:
41 return super().to_representation(instance)
42 except AttributeError: # pragma: no cover
43 if isinstance(self.instance, list):
44 for inst in self.instance:
45 inst.delete()
46 else:
47 self.instance.delete()
48 return {}
49
50
51 class TaskViewSet(ViewSet):
52 """Read-only view set that returns all background tasks"""
53
54 permission_classes = [IsAdminUser]
55 serializer_class = TaskSerializer
56
57 @extend_schema(
58 responses={
59 200: TaskSerializer(many=False),
60 404: OpenApiResponse(description="Task not found"),
61 },
62 parameters=[
63 OpenApiParameter(
64 "id",
65 type=OpenApiTypes.STR,
66 location=OpenApiParameter.PATH,
67 required=True,
68 ),
69 ],
70 )
71 # pylint: disable=invalid-name
72 def retrieve(self, request: Request, pk=None) -> Response:
73 """Get a single system task"""
74 task = TaskInfo.by_name(pk)
75 if not task:
76 raise Http404
77 return Response(TaskSerializer(task, many=False).data)
78
79 @extend_schema(responses={200: TaskSerializer(many=True)})
80 def list(self, request: Request) -> Response:
81 """List system tasks"""
82 tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name)
83 return Response(TaskSerializer(tasks, many=True).data)
84
85 @extend_schema(
86 request=OpenApiTypes.NONE,
87 responses={
88 204: OpenApiResponse(description="Task retried successfully"),
89 404: OpenApiResponse(description="Task not found"),
90 500: OpenApiResponse(description="Failed to retry task"),
91 },
92 parameters=[
93 OpenApiParameter(
94 "id",
95 type=OpenApiTypes.STR,
96 location=OpenApiParameter.PATH,
97 required=True,
98 ),
99 ],
100 )
101 @action(detail=True, methods=["post"])
102 # pylint: disable=invalid-name
103 def retry(self, request: Request, pk=None) -> Response:
104 """Retry task"""
105 task = TaskInfo.by_name(pk)
106 if not task:
107 raise Http404
108 try:
109 task_module = import_module(task.task_call_module)
110 task_func = getattr(task_module, task.task_call_func)
111 LOGGER.debug("Running task", task=task_func)
112 task_func.delay(*task.task_call_args, **task.task_call_kwargs)
113 messages.success(
114 self.request,
115 _("Successfully re-scheduled Task %(name)s!" % {"name": task.task_name}),
116 )
117 return Response(status=204)
118 except (ImportError, AttributeError): # pragma: no cover
119 LOGGER.warning("Failed to run task, remove state", task=task)
120 # if we get an import error, the module path has probably changed
121 task.delete()
122 return Response(status=500)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/admin/api/tasks.py b/authentik/admin/api/tasks.py
--- a/authentik/admin/api/tasks.py
+++ b/authentik/admin/api/tasks.py
@@ -7,7 +7,13 @@
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema
from rest_framework.decorators import action
-from rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField
+from rest_framework.fields import (
+ CharField,
+ ChoiceField,
+ DateTimeField,
+ ListField,
+ SerializerMethodField,
+)
from rest_framework.permissions import IsAdminUser
from rest_framework.request import Request
from rest_framework.response import Response
@@ -26,6 +32,7 @@
task_name = CharField()
task_description = CharField()
task_finish_timestamp = DateTimeField(source="finish_time")
+ task_duration = SerializerMethodField()
status = ChoiceField(
source="result.status.name",
@@ -33,7 +40,11 @@
)
messages = ListField(source="result.messages")
- def to_representation(self, instance):
+ def get_task_duration(self, instance: TaskInfo) -> int:
+ """Get the duration a task took to run"""
+ return max(instance.finish_timestamp - instance.start_timestamp, 0)
+
+ def to_representation(self, instance: TaskInfo):
"""When a new version of authentik adds fields to TaskInfo,
the API will fail with an AttributeError, as the classes
are pickled in cache. In that case, just delete the info"""
| {"golden_diff": "diff --git a/authentik/admin/api/tasks.py b/authentik/admin/api/tasks.py\n--- a/authentik/admin/api/tasks.py\n+++ b/authentik/admin/api/tasks.py\n@@ -7,7 +7,13 @@\n from drf_spectacular.types import OpenApiTypes\n from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema\n from rest_framework.decorators import action\n-from rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField\n+from rest_framework.fields import (\n+ CharField,\n+ ChoiceField,\n+ DateTimeField,\n+ ListField,\n+ SerializerMethodField,\n+)\n from rest_framework.permissions import IsAdminUser\n from rest_framework.request import Request\n from rest_framework.response import Response\n@@ -26,6 +32,7 @@\n task_name = CharField()\n task_description = CharField()\n task_finish_timestamp = DateTimeField(source=\"finish_time\")\n+ task_duration = SerializerMethodField()\n \n status = ChoiceField(\n source=\"result.status.name\",\n@@ -33,7 +40,11 @@\n )\n messages = ListField(source=\"result.messages\")\n \n- def to_representation(self, instance):\n+ def get_task_duration(self, instance: TaskInfo) -> int:\n+ \"\"\"Get the duration a task took to run\"\"\"\n+ return max(instance.finish_timestamp - instance.start_timestamp, 0)\n+\n+ def to_representation(self, instance: TaskInfo):\n \"\"\"When a new version of authentik adds fields to TaskInfo,\n the API will fail with an AttributeError, as the classes\n are pickled in cache. In that case, just delete the info\"\"\"\n", "issue": "System Tasks: Show start timestamp and calculate Duration\n**Is your feature request related to a problem? Please describe.**\r\nFor debugging purposes, I need the info when a task started, and when it finished. \r\n\r\n**Describe the solution you'd like**\r\nI have seen that the TaskInfo object actually holds that information, but it is not returned by the API, and not shown in the \"SystemTasks\" table of the web UI. \r\nIt would also make sense to calculate the duration for easier debugging.\r\n\r\n**Describe alternatives you've considered**\r\nI could look this up in the database, but this would be questionable UX, since there is already a view in the web app which should show this information.\r\n\r\n**Additional context**\r\n(none)\r\n\n", "before_files": [{"content": "\"\"\"Tasks API\"\"\"\nfrom importlib import import_module\n\nfrom django.contrib import messages\nfrom django.http.response import Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.events.monitored_tasks import TaskInfo, TaskResultStatus\n\nLOGGER = get_logger()\n\n\nclass TaskSerializer(PassiveSerializer):\n \"\"\"Serialize TaskInfo and TaskResult\"\"\"\n\n task_name = CharField()\n task_description = CharField()\n task_finish_timestamp = DateTimeField(source=\"finish_time\")\n\n status = ChoiceField(\n source=\"result.status.name\",\n choices=[(x.name, x.name) for x in TaskResultStatus],\n )\n messages = ListField(source=\"result.messages\")\n\n def to_representation(self, instance):\n \"\"\"When a new version of authentik adds fields to TaskInfo,\n the API will fail with an AttributeError, as the classes\n are pickled in cache. In that case, just delete the info\"\"\"\n try:\n return super().to_representation(instance)\n except AttributeError: # pragma: no cover\n if isinstance(self.instance, list):\n for inst in self.instance:\n inst.delete()\n else:\n self.instance.delete()\n return {}\n\n\nclass TaskViewSet(ViewSet):\n \"\"\"Read-only view set that returns all background tasks\"\"\"\n\n permission_classes = [IsAdminUser]\n serializer_class = TaskSerializer\n\n @extend_schema(\n responses={\n 200: TaskSerializer(many=False),\n 404: OpenApiResponse(description=\"Task not found\"),\n },\n parameters=[\n OpenApiParameter(\n \"id\",\n type=OpenApiTypes.STR,\n location=OpenApiParameter.PATH,\n required=True,\n ),\n ],\n )\n # pylint: disable=invalid-name\n def retrieve(self, request: Request, pk=None) -> Response:\n \"\"\"Get a single system task\"\"\"\n task = TaskInfo.by_name(pk)\n if not task:\n raise Http404\n return Response(TaskSerializer(task, many=False).data)\n\n @extend_schema(responses={200: TaskSerializer(many=True)})\n def list(self, request: Request) -> Response:\n \"\"\"List system tasks\"\"\"\n tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name)\n return Response(TaskSerializer(tasks, many=True).data)\n\n @extend_schema(\n request=OpenApiTypes.NONE,\n responses={\n 204: OpenApiResponse(description=\"Task retried successfully\"),\n 404: OpenApiResponse(description=\"Task not found\"),\n 500: OpenApiResponse(description=\"Failed to retry task\"),\n },\n parameters=[\n OpenApiParameter(\n \"id\",\n type=OpenApiTypes.STR,\n location=OpenApiParameter.PATH,\n required=True,\n ),\n ],\n )\n @action(detail=True, methods=[\"post\"])\n # pylint: disable=invalid-name\n def retry(self, request: Request, pk=None) -> Response:\n \"\"\"Retry task\"\"\"\n task = TaskInfo.by_name(pk)\n if not task:\n raise Http404\n try:\n task_module = import_module(task.task_call_module)\n task_func = getattr(task_module, task.task_call_func)\n LOGGER.debug(\"Running task\", task=task_func)\n task_func.delay(*task.task_call_args, **task.task_call_kwargs)\n messages.success(\n self.request,\n _(\"Successfully re-scheduled Task %(name)s!\" % {\"name\": task.task_name}),\n )\n return Response(status=204)\n except (ImportError, AttributeError): # pragma: no cover\n LOGGER.warning(\"Failed to run task, remove state\", task=task)\n # if we get an import error, the module path has probably changed\n task.delete()\n return Response(status=500)\n", "path": "authentik/admin/api/tasks.py"}], "after_files": [{"content": "\"\"\"Tasks API\"\"\"\nfrom importlib import import_module\n\nfrom django.contrib import messages\nfrom django.http.response import Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import (\n CharField,\n ChoiceField,\n DateTimeField,\n ListField,\n SerializerMethodField,\n)\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.events.monitored_tasks import TaskInfo, TaskResultStatus\n\nLOGGER = get_logger()\n\n\nclass TaskSerializer(PassiveSerializer):\n \"\"\"Serialize TaskInfo and TaskResult\"\"\"\n\n task_name = CharField()\n task_description = CharField()\n task_finish_timestamp = DateTimeField(source=\"finish_time\")\n task_duration = SerializerMethodField()\n\n status = ChoiceField(\n source=\"result.status.name\",\n choices=[(x.name, x.name) for x in TaskResultStatus],\n )\n messages = ListField(source=\"result.messages\")\n\n def get_task_duration(self, instance: TaskInfo) -> int:\n \"\"\"Get the duration a task took to run\"\"\"\n return max(instance.finish_timestamp - instance.start_timestamp, 0)\n\n def to_representation(self, instance: TaskInfo):\n \"\"\"When a new version of authentik adds fields to TaskInfo,\n the API will fail with an AttributeError, as the classes\n are pickled in cache. In that case, just delete the info\"\"\"\n try:\n return super().to_representation(instance)\n except AttributeError: # pragma: no cover\n if isinstance(self.instance, list):\n for inst in self.instance:\n inst.delete()\n else:\n self.instance.delete()\n return {}\n\n\nclass TaskViewSet(ViewSet):\n \"\"\"Read-only view set that returns all background tasks\"\"\"\n\n permission_classes = [IsAdminUser]\n serializer_class = TaskSerializer\n\n @extend_schema(\n responses={\n 200: TaskSerializer(many=False),\n 404: OpenApiResponse(description=\"Task not found\"),\n },\n parameters=[\n OpenApiParameter(\n \"id\",\n type=OpenApiTypes.STR,\n location=OpenApiParameter.PATH,\n required=True,\n ),\n ],\n )\n # pylint: disable=invalid-name\n def retrieve(self, request: Request, pk=None) -> Response:\n \"\"\"Get a single system task\"\"\"\n task = TaskInfo.by_name(pk)\n if not task:\n raise Http404\n return Response(TaskSerializer(task, many=False).data)\n\n @extend_schema(responses={200: TaskSerializer(many=True)})\n def list(self, request: Request) -> Response:\n \"\"\"List system tasks\"\"\"\n tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name)\n return Response(TaskSerializer(tasks, many=True).data)\n\n @extend_schema(\n request=OpenApiTypes.NONE,\n responses={\n 204: OpenApiResponse(description=\"Task retried successfully\"),\n 404: OpenApiResponse(description=\"Task not found\"),\n 500: OpenApiResponse(description=\"Failed to retry task\"),\n },\n parameters=[\n OpenApiParameter(\n \"id\",\n type=OpenApiTypes.STR,\n location=OpenApiParameter.PATH,\n required=True,\n ),\n ],\n )\n @action(detail=True, methods=[\"post\"])\n # pylint: disable=invalid-name\n def retry(self, request: Request, pk=None) -> Response:\n \"\"\"Retry task\"\"\"\n task = TaskInfo.by_name(pk)\n if not task:\n raise Http404\n try:\n task_module = import_module(task.task_call_module)\n task_func = getattr(task_module, task.task_call_func)\n LOGGER.debug(\"Running task\", task=task_func)\n task_func.delay(*task.task_call_args, **task.task_call_kwargs)\n messages.success(\n self.request,\n _(\"Successfully re-scheduled Task %(name)s!\" % {\"name\": task.task_name}),\n )\n return Response(status=204)\n except (ImportError, AttributeError): # pragma: no cover\n LOGGER.warning(\"Failed to run task, remove state\", task=task)\n # if we get an import error, the module path has probably changed\n task.delete()\n return Response(status=500)\n", "path": "authentik/admin/api/tasks.py"}]} | 1,594 | 360 |
gh_patches_debug_31542 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
download-data fails with difficult to interpret messages in certain cases
1)
```
(.env) user@nuc:~/freqtrade-wrk/github-hroff-1902/freqtrade$ freqtrade download-data
2019-09-06 01:00:01,335 - freqtrade.loggers - INFO - Verbosity set to 0
2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Dry run is enabled
2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Using DB: "sqlite://"
2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Using max_open_trades: None ...
2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...
2019-09-06 01:00:01,337 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data ...
2019-09-06 01:00:01,337 - freqtrade.configuration.configuration - INFO - timeframes --timeframes: ['1m', '5m']
2019-09-06 01:00:01,337 - freqtrade.configuration.check_exchange - INFO - Checking exchange...
2019-09-06 01:00:01,337 - freqtrade - ERROR - Exchange "" is not supported by ccxt and therefore not available for the bot.
The following exchanges are supported by ccxt: _1btcxe, acx, allcoin, anxpro, bcex, bequant, bibox, bigone, binance, binanceje, bit2c, bitbank, bitbay, bitfinex, bitfinex2, bitflyer, bitforex, bithumb, bitkk, bitlish, bitmart, bitmex, bitso, bitstamp, bitstamp1, bittrex, bitz, bl3p, bleutrade, braziliex, btcalpha, btcbox, btcchina, btcmarkets, btctradeim, btctradeua, btcturk, buda, bxinth, cex, chilebit, cobinhood, coinbase, coinbaseprime, coinbasepro, coincheck, coinegg, coinex, coinexchange, coinfalcon, coinfloor, coingi, coinmarketcap, coinmate, coinone, coinspot, cointiger, coolcoin, coss, crex24, crypton, deribit, digifinex, dsx, dx, ethfinex, exmo, exx, fcoin, fcoinjp, flowbtc, foxbit, fybse, gateio, gdax, gemini, hitbtc, hitbtc2, huobipro, huobiru, ice3x, independentreserve, indodax, itbit, kkex, kraken, kucoin, kucoin2, kuna, lakebtc, latoken, lbank, liquid, livecoin, luno, lykke, mandala, mercado, mixcoins, negociecoins, nova, oceanex, okcoincny, okcoinusd, okex, okex3, paymium, poloniex, rightbtc, southxchange, stronghold, surbitcoin, theocean, therock, tidebit, tidex, upbit, vaultoro, vbtc, virwox, xbtce, yobit, zaif, zb
```
-- Maybe it's covered by #2217 . If not, the message should not include `Exchange ""` but should be something like `Exchange is not specified.`
2)
```
(.env) user@nuc:~/freqtrade-wrk/github-hroff-1902/freqtrade$ freqtrade download-data --exchange bittrex
2019-09-06 01:00:15,176 - freqtrade.loggers - INFO - Verbosity set to 0
2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Dry run is enabled
2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Using DB: "sqlite://"
2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Using max_open_trades: None ...
2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Using exchange bittrex
2019-09-06 01:00:15,178 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...
2019-09-06 01:00:15,178 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/bittrex ...
2019-09-06 01:00:15,178 - freqtrade.configuration.configuration - INFO - timeframes --timeframes: ['1m', '5m']
2019-09-06 01:00:15,178 - freqtrade.configuration.check_exchange - INFO - Checking exchange...
2019-09-06 01:00:15,178 - freqtrade.configuration.check_exchange - INFO - Exchange "bittrex" is officially supported by the Freqtrade development team.
2019-09-06 01:00:15,179 - freqtrade - ERROR - Fatal exception!
Traceback (most recent call last):
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py", line 40, in main
args.func(args)
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/utils.py", line 75, in start_download_data
logger.info(f'About to download pairs: {config["pairs"]}, '
KeyError: 'pairs'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/configuration/check_exchange.py`
Content:
```
1 import logging
2 from typing import Any, Dict
3
4 from freqtrade import OperationalException
5 from freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,
6 is_exchange_available, is_exchange_bad,
7 is_exchange_officially_supported)
8 from freqtrade.state import RunMode
9
10 logger = logging.getLogger(__name__)
11
12
13 def check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:
14 """
15 Check if the exchange name in the config file is supported by Freqtrade
16 :param check_for_bad: if True, check the exchange against the list of known 'bad'
17 exchanges
18 :return: False if exchange is 'bad', i.e. is known to work with the bot with
19 critical issues or does not work at all, crashes, etc. True otherwise.
20 raises an exception if the exchange if not supported by ccxt
21 and thus is not known for the Freqtrade at all.
22 """
23
24 if config['runmode'] in [RunMode.PLOT] and not config.get('exchange', {}).get('name'):
25 # Skip checking exchange in plot mode, since it requires no exchange
26 return True
27 logger.info("Checking exchange...")
28
29 exchange = config.get('exchange', {}).get('name').lower()
30 if not is_exchange_available(exchange):
31 raise OperationalException(
32 f'Exchange "{exchange}" is not supported by ccxt '
33 f'and therefore not available for the bot.\n'
34 f'The following exchanges are supported by ccxt: '
35 f'{", ".join(available_exchanges())}'
36 )
37
38 if check_for_bad and is_exchange_bad(exchange):
39 raise OperationalException(f'Exchange "{exchange}" is known to not work with the bot yet. '
40 f'Reason: {get_exchange_bad_reason(exchange)}')
41
42 if is_exchange_officially_supported(exchange):
43 logger.info(f'Exchange "{exchange}" is officially supported '
44 f'by the Freqtrade development team.')
45 else:
46 logger.warning(f'Exchange "{exchange}" is supported by ccxt '
47 f'and therefore available for the bot but not officially supported '
48 f'by the Freqtrade development team. '
49 f'It may work flawlessly (please report back) or have serious issues. '
50 f'Use it at your own discretion.')
51
52 return True
53
```
Path: `freqtrade/utils.py`
Content:
```
1 import logging
2 import sys
3 from pathlib import Path
4 from typing import Any, Dict, List
5
6 import arrow
7
8 from freqtrade.configuration import Configuration, TimeRange
9 from freqtrade.configuration.directory_operations import create_userdata_dir
10 from freqtrade.data.history import refresh_backtest_ohlcv_data
11 from freqtrade.exchange import available_exchanges
12 from freqtrade.resolvers import ExchangeResolver
13 from freqtrade.state import RunMode
14
15 logger = logging.getLogger(__name__)
16
17
18 def setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
19 """
20 Prepare the configuration for utils subcommands
21 :param args: Cli args from Arguments()
22 :return: Configuration
23 """
24 configuration = Configuration(args, method)
25 config = configuration.get_config()
26
27 config['exchange']['dry_run'] = True
28 # Ensure we do not use Exchange credentials
29 config['exchange']['key'] = ''
30 config['exchange']['secret'] = ''
31
32 return config
33
34
35 def start_list_exchanges(args: Dict[str, Any]) -> None:
36 """
37 Print available exchanges
38 :param args: Cli args from Arguments()
39 :return: None
40 """
41
42 if args['print_one_column']:
43 print('\n'.join(available_exchanges()))
44 else:
45 print(f"Exchanges supported by ccxt and available for Freqtrade: "
46 f"{', '.join(available_exchanges())}")
47
48
49 def start_create_userdir(args: Dict[str, Any]) -> None:
50 """
51 Create "user_data" directory to contain user data strategies, hyperopts, ...)
52 :param args: Cli args from Arguments()
53 :return: None
54 """
55 if "user_data_dir" in args and args["user_data_dir"]:
56 create_userdata_dir(args["user_data_dir"], create_dir=True)
57 else:
58 logger.warning("`create-userdir` requires --userdir to be set.")
59 sys.exit(1)
60
61
62 def start_download_data(args: Dict[str, Any]) -> None:
63 """
64 Download data (former download_backtest_data.py script)
65 """
66 config = setup_utils_configuration(args, RunMode.OTHER)
67
68 timerange = TimeRange()
69 if 'days' in config:
70 time_since = arrow.utcnow().shift(days=-config['days']).strftime("%Y%m%d")
71 timerange = TimeRange.parse_timerange(f'{time_since}-')
72
73 dl_path = Path(config['datadir'])
74 logger.info(f'About to download pairs: {config["pairs"]}, '
75 f'intervals: {config["timeframes"]} to {dl_path}')
76
77 pairs_not_available: List[str] = []
78
79 try:
80 # Init exchange
81 exchange = ExchangeResolver(config['exchange']['name'], config).exchange
82
83 pairs_not_available = refresh_backtest_ohlcv_data(
84 exchange, pairs=config["pairs"], timeframes=config["timeframes"],
85 dl_path=Path(config['datadir']), timerange=timerange, erase=config.get("erase"))
86
87 except KeyboardInterrupt:
88 sys.exit("SIGINT received, aborting ...")
89
90 finally:
91 if pairs_not_available:
92 logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
93 f"on exchange {config['exchange']['name']}.")
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/configuration/check_exchange.py b/freqtrade/configuration/check_exchange.py
--- a/freqtrade/configuration/check_exchange.py
+++ b/freqtrade/configuration/check_exchange.py
@@ -27,6 +27,14 @@
logger.info("Checking exchange...")
exchange = config.get('exchange', {}).get('name').lower()
+ if not exchange:
+ raise OperationalException(
+ f'This command requires a configured exchange. You should either use '
+ f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\n'
+ f'The following exchanges are supported by ccxt: '
+ f'{", ".join(available_exchanges())}'
+ )
+
if not is_exchange_available(exchange):
raise OperationalException(
f'Exchange "{exchange}" is not supported by ccxt '
diff --git a/freqtrade/utils.py b/freqtrade/utils.py
--- a/freqtrade/utils.py
+++ b/freqtrade/utils.py
@@ -5,6 +5,7 @@
import arrow
+from freqtrade import OperationalException
from freqtrade.configuration import Configuration, TimeRange
from freqtrade.configuration.directory_operations import create_userdata_dir
from freqtrade.data.history import refresh_backtest_ohlcv_data
@@ -70,6 +71,11 @@
time_since = arrow.utcnow().shift(days=-config['days']).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
+ if 'pairs' not in config:
+ raise OperationalException(
+ "Downloading data requires a list of pairs. "
+ "Please check the documentation on how to configure this.")
+
dl_path = Path(config['datadir'])
logger.info(f'About to download pairs: {config["pairs"]}, '
f'intervals: {config["timeframes"]} to {dl_path}')
| {"golden_diff": "diff --git a/freqtrade/configuration/check_exchange.py b/freqtrade/configuration/check_exchange.py\n--- a/freqtrade/configuration/check_exchange.py\n+++ b/freqtrade/configuration/check_exchange.py\n@@ -27,6 +27,14 @@\n logger.info(\"Checking exchange...\")\n \n exchange = config.get('exchange', {}).get('name').lower()\n+ if not exchange:\n+ raise OperationalException(\n+ f'This command requires a configured exchange. You should either use '\n+ f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\\n'\n+ f'The following exchanges are supported by ccxt: '\n+ f'{\", \".join(available_exchanges())}'\n+ )\n+\n if not is_exchange_available(exchange):\n raise OperationalException(\n f'Exchange \"{exchange}\" is not supported by ccxt '\ndiff --git a/freqtrade/utils.py b/freqtrade/utils.py\n--- a/freqtrade/utils.py\n+++ b/freqtrade/utils.py\n@@ -5,6 +5,7 @@\n \n import arrow\n \n+from freqtrade import OperationalException\n from freqtrade.configuration import Configuration, TimeRange\n from freqtrade.configuration.directory_operations import create_userdata_dir\n from freqtrade.data.history import refresh_backtest_ohlcv_data\n@@ -70,6 +71,11 @@\n time_since = arrow.utcnow().shift(days=-config['days']).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n \n+ if 'pairs' not in config:\n+ raise OperationalException(\n+ \"Downloading data requires a list of pairs. \"\n+ \"Please check the documentation on how to configure this.\")\n+\n dl_path = Path(config['datadir'])\n logger.info(f'About to download pairs: {config[\"pairs\"]}, '\n f'intervals: {config[\"timeframes\"]} to {dl_path}')\n", "issue": "download-data fails with difficult to interpret messages in certain cases\n1)\r\n```\r\n(.env) user@nuc:~/freqtrade-wrk/github-hroff-1902/freqtrade$ freqtrade download-data\r\n2019-09-06 01:00:01,335 - freqtrade.loggers - INFO - Verbosity set to 0\r\n2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Dry run is enabled\r\n2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Using DB: \"sqlite://\"\r\n2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Using max_open_trades: None ...\r\n2019-09-06 01:00:01,336 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...\r\n2019-09-06 01:00:01,337 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data ...\r\n2019-09-06 01:00:01,337 - freqtrade.configuration.configuration - INFO - timeframes --timeframes: ['1m', '5m']\r\n2019-09-06 01:00:01,337 - freqtrade.configuration.check_exchange - INFO - Checking exchange...\r\n2019-09-06 01:00:01,337 - freqtrade - ERROR - Exchange \"\" is not supported by ccxt and therefore not available for the bot.\r\nThe following exchanges are supported by ccxt: _1btcxe, acx, allcoin, anxpro, bcex, bequant, bibox, bigone, binance, binanceje, bit2c, bitbank, bitbay, bitfinex, bitfinex2, bitflyer, bitforex, bithumb, bitkk, bitlish, bitmart, bitmex, bitso, bitstamp, bitstamp1, bittrex, bitz, bl3p, bleutrade, braziliex, btcalpha, btcbox, btcchina, btcmarkets, btctradeim, btctradeua, btcturk, buda, bxinth, cex, chilebit, cobinhood, coinbase, coinbaseprime, coinbasepro, coincheck, coinegg, coinex, coinexchange, coinfalcon, coinfloor, coingi, coinmarketcap, coinmate, coinone, coinspot, cointiger, coolcoin, coss, crex24, crypton, deribit, digifinex, dsx, dx, ethfinex, exmo, exx, fcoin, fcoinjp, flowbtc, foxbit, fybse, gateio, gdax, gemini, hitbtc, hitbtc2, huobipro, huobiru, ice3x, independentreserve, indodax, itbit, kkex, kraken, kucoin, kucoin2, kuna, lakebtc, latoken, lbank, liquid, livecoin, luno, lykke, mandala, mercado, mixcoins, negociecoins, nova, oceanex, okcoincny, okcoinusd, okex, okex3, paymium, poloniex, rightbtc, southxchange, stronghold, surbitcoin, theocean, therock, tidebit, tidex, upbit, vaultoro, vbtc, virwox, xbtce, yobit, zaif, zb\r\n```\r\n-- Maybe it's covered by #2217 . If not, the message should not include `Exchange \"\"` but should be something like `Exchange is not specified.`\r\n\r\n2) \r\n```\r\n(.env) user@nuc:~/freqtrade-wrk/github-hroff-1902/freqtrade$ freqtrade download-data --exchange bittrex\r\n2019-09-06 01:00:15,176 - freqtrade.loggers - INFO - Verbosity set to 0\r\n2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Dry run is enabled\r\n2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Using DB: \"sqlite://\"\r\n2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Using max_open_trades: None ...\r\n2019-09-06 01:00:15,177 - freqtrade.configuration.configuration - INFO - Using exchange bittrex\r\n2019-09-06 01:00:15,178 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...\r\n2019-09-06 01:00:15,178 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/bittrex ...\r\n2019-09-06 01:00:15,178 - freqtrade.configuration.configuration - INFO - timeframes --timeframes: ['1m', '5m']\r\n2019-09-06 01:00:15,178 - freqtrade.configuration.check_exchange - INFO - Checking exchange...\r\n2019-09-06 01:00:15,178 - freqtrade.configuration.check_exchange - INFO - Exchange \"bittrex\" is officially supported by the Freqtrade development team.\r\n2019-09-06 01:00:15,179 - freqtrade - ERROR - Fatal exception!\r\nTraceback (most recent call last):\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py\", line 40, in main\r\n args.func(args)\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/utils.py\", line 75, in start_download_data\r\n logger.info(f'About to download pairs: {config[\"pairs\"]}, '\r\nKeyError: 'pairs'\r\n```\r\n\n", "before_files": [{"content": "import logging\nfrom typing import Any, Dict\n\nfrom freqtrade import OperationalException\nfrom freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,\n is_exchange_available, is_exchange_bad,\n is_exchange_officially_supported)\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:\n \"\"\"\n Check if the exchange name in the config file is supported by Freqtrade\n :param check_for_bad: if True, check the exchange against the list of known 'bad'\n exchanges\n :return: False if exchange is 'bad', i.e. is known to work with the bot with\n critical issues or does not work at all, crashes, etc. True otherwise.\n raises an exception if the exchange if not supported by ccxt\n and thus is not known for the Freqtrade at all.\n \"\"\"\n\n if config['runmode'] in [RunMode.PLOT] and not config.get('exchange', {}).get('name'):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n logger.info(\"Checking exchange...\")\n\n exchange = config.get('exchange', {}).get('name').lower()\n if not is_exchange_available(exchange):\n raise OperationalException(\n f'Exchange \"{exchange}\" is not supported by ccxt '\n f'and therefore not available for the bot.\\n'\n f'The following exchanges are supported by ccxt: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if check_for_bad and is_exchange_bad(exchange):\n raise OperationalException(f'Exchange \"{exchange}\" is known to not work with the bot yet. '\n f'Reason: {get_exchange_bad_reason(exchange)}')\n\n if is_exchange_officially_supported(exchange):\n logger.info(f'Exchange \"{exchange}\" is officially supported '\n f'by the Freqtrade development team.')\n else:\n logger.warning(f'Exchange \"{exchange}\" is supported by ccxt '\n f'and therefore available for the bot but not officially supported '\n f'by the Freqtrade development team. '\n f'It may work flawlessly (please report back) or have serious issues. '\n f'Use it at your own discretion.')\n\n return True\n", "path": "freqtrade/configuration/check_exchange.py"}, {"content": "import logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport arrow\n\nfrom freqtrade.configuration import Configuration, TimeRange\nfrom freqtrade.configuration.directory_operations import create_userdata_dir\nfrom freqtrade.data.history import refresh_backtest_ohlcv_data\nfrom freqtrade.exchange import available_exchanges\nfrom freqtrade.resolvers import ExchangeResolver\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:\n \"\"\"\n Prepare the configuration for utils subcommands\n :param args: Cli args from Arguments()\n :return: Configuration\n \"\"\"\n configuration = Configuration(args, method)\n config = configuration.get_config()\n\n config['exchange']['dry_run'] = True\n # Ensure we do not use Exchange credentials\n config['exchange']['key'] = ''\n config['exchange']['secret'] = ''\n\n return config\n\n\ndef start_list_exchanges(args: Dict[str, Any]) -> None:\n \"\"\"\n Print available exchanges\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n\n if args['print_one_column']:\n print('\\n'.join(available_exchanges()))\n else:\n print(f\"Exchanges supported by ccxt and available for Freqtrade: \"\n f\"{', '.join(available_exchanges())}\")\n\n\ndef start_create_userdir(args: Dict[str, Any]) -> None:\n \"\"\"\n Create \"user_data\" directory to contain user data strategies, hyperopts, ...)\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n if \"user_data_dir\" in args and args[\"user_data_dir\"]:\n create_userdata_dir(args[\"user_data_dir\"], create_dir=True)\n else:\n logger.warning(\"`create-userdir` requires --userdir to be set.\")\n sys.exit(1)\n\n\ndef start_download_data(args: Dict[str, Any]) -> None:\n \"\"\"\n Download data (former download_backtest_data.py script)\n \"\"\"\n config = setup_utils_configuration(args, RunMode.OTHER)\n\n timerange = TimeRange()\n if 'days' in config:\n time_since = arrow.utcnow().shift(days=-config['days']).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n\n dl_path = Path(config['datadir'])\n logger.info(f'About to download pairs: {config[\"pairs\"]}, '\n f'intervals: {config[\"timeframes\"]} to {dl_path}')\n\n pairs_not_available: List[str] = []\n\n try:\n # Init exchange\n exchange = ExchangeResolver(config['exchange']['name'], config).exchange\n\n pairs_not_available = refresh_backtest_ohlcv_data(\n exchange, pairs=config[\"pairs\"], timeframes=config[\"timeframes\"],\n dl_path=Path(config['datadir']), timerange=timerange, erase=config.get(\"erase\"))\n\n except KeyboardInterrupt:\n sys.exit(\"SIGINT received, aborting ...\")\n\n finally:\n if pairs_not_available:\n logger.info(f\"Pairs [{','.join(pairs_not_available)}] not available \"\n f\"on exchange {config['exchange']['name']}.\")\n", "path": "freqtrade/utils.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Dict\n\nfrom freqtrade import OperationalException\nfrom freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,\n is_exchange_available, is_exchange_bad,\n is_exchange_officially_supported)\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:\n \"\"\"\n Check if the exchange name in the config file is supported by Freqtrade\n :param check_for_bad: if True, check the exchange against the list of known 'bad'\n exchanges\n :return: False if exchange is 'bad', i.e. is known to work with the bot with\n critical issues or does not work at all, crashes, etc. True otherwise.\n raises an exception if the exchange if not supported by ccxt\n and thus is not known for the Freqtrade at all.\n \"\"\"\n\n if config['runmode'] in [RunMode.PLOT] and not config.get('exchange', {}).get('name'):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n logger.info(\"Checking exchange...\")\n\n exchange = config.get('exchange', {}).get('name').lower()\n if not exchange:\n raise OperationalException(\n f'This command requires a configured exchange. You should either use '\n f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\\n'\n f'The following exchanges are supported by ccxt: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if not is_exchange_available(exchange):\n raise OperationalException(\n f'Exchange \"{exchange}\" is not supported by ccxt '\n f'and therefore not available for the bot.\\n'\n f'The following exchanges are supported by ccxt: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if check_for_bad and is_exchange_bad(exchange):\n raise OperationalException(f'Exchange \"{exchange}\" is known to not work with the bot yet. '\n f'Reason: {get_exchange_bad_reason(exchange)}')\n\n if is_exchange_officially_supported(exchange):\n logger.info(f'Exchange \"{exchange}\" is officially supported '\n f'by the Freqtrade development team.')\n else:\n logger.warning(f'Exchange \"{exchange}\" is supported by ccxt '\n f'and therefore available for the bot but not officially supported '\n f'by the Freqtrade development team. '\n f'It may work flawlessly (please report back) or have serious issues. '\n f'Use it at your own discretion.')\n\n return True\n", "path": "freqtrade/configuration/check_exchange.py"}, {"content": "import logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport arrow\n\nfrom freqtrade import OperationalException\nfrom freqtrade.configuration import Configuration, TimeRange\nfrom freqtrade.configuration.directory_operations import create_userdata_dir\nfrom freqtrade.data.history import refresh_backtest_ohlcv_data\nfrom freqtrade.exchange import available_exchanges\nfrom freqtrade.resolvers import ExchangeResolver\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:\n \"\"\"\n Prepare the configuration for utils subcommands\n :param args: Cli args from Arguments()\n :return: Configuration\n \"\"\"\n configuration = Configuration(args, method)\n config = configuration.get_config()\n\n config['exchange']['dry_run'] = True\n # Ensure we do not use Exchange credentials\n config['exchange']['key'] = ''\n config['exchange']['secret'] = ''\n\n return config\n\n\ndef start_list_exchanges(args: Dict[str, Any]) -> None:\n \"\"\"\n Print available exchanges\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n\n if args['print_one_column']:\n print('\\n'.join(available_exchanges()))\n else:\n print(f\"Exchanges supported by ccxt and available for Freqtrade: \"\n f\"{', '.join(available_exchanges())}\")\n\n\ndef start_create_userdir(args: Dict[str, Any]) -> None:\n \"\"\"\n Create \"user_data\" directory to contain user data strategies, hyperopts, ...)\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n if \"user_data_dir\" in args and args[\"user_data_dir\"]:\n create_userdata_dir(args[\"user_data_dir\"], create_dir=True)\n else:\n logger.warning(\"`create-userdir` requires --userdir to be set.\")\n sys.exit(1)\n\n\ndef start_download_data(args: Dict[str, Any]) -> None:\n \"\"\"\n Download data (former download_backtest_data.py script)\n \"\"\"\n config = setup_utils_configuration(args, RunMode.OTHER)\n\n timerange = TimeRange()\n if 'days' in config:\n time_since = arrow.utcnow().shift(days=-config['days']).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n\n if 'pairs' not in config:\n raise OperationalException(\n \"Downloading data requires a list of pairs. \"\n \"Please check the documentation on how to configure this.\")\n\n dl_path = Path(config['datadir'])\n logger.info(f'About to download pairs: {config[\"pairs\"]}, '\n f'intervals: {config[\"timeframes\"]} to {dl_path}')\n\n pairs_not_available: List[str] = []\n\n try:\n # Init exchange\n exchange = ExchangeResolver(config['exchange']['name'], config).exchange\n\n pairs_not_available = refresh_backtest_ohlcv_data(\n exchange, pairs=config[\"pairs\"], timeframes=config[\"timeframes\"],\n dl_path=Path(config['datadir']), timerange=timerange, erase=config.get(\"erase\"))\n\n except KeyboardInterrupt:\n sys.exit(\"SIGINT received, aborting ...\")\n\n finally:\n if pairs_not_available:\n logger.info(f\"Pairs [{','.join(pairs_not_available)}] not available \"\n f\"on exchange {config['exchange']['name']}.\")\n", "path": "freqtrade/utils.py"}]} | 3,287 | 410 |
gh_patches_debug_11756 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[META 659] Backend dependencies granularity for NoSQL and Messaging
See meta issue for the description and details:
- Meta issue: https://github.com/elastic/apm/issues/659
This is done for everything except for Elasticsearch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/elasticsearch.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 import re
34 from typing import Optional
35 from urllib.parse import parse_qs, urlparse
36
37 import elasticapm
38 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
39 from elasticapm.traces import DroppedSpan, execution_context
40 from elasticapm.utils.logging import get_logger
41
42 logger = get_logger("elasticapm.instrument")
43
44 should_capture_body_re = re.compile("/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)")
45
46
47 class ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):
48 name = "elasticsearch_connection"
49
50 def get_instrument_list(self):
51 try:
52 import elastic_transport # noqa: F401
53
54 return [
55 ("elastic_transport._node._http_urllib3", "Urllib3HttpNode.perform_request"),
56 ("elastic_transport._node._http_requests", "RequestsHttpNode.perform_request"),
57 ]
58 except ImportError:
59 return [
60 ("elasticsearch.connection.http_urllib3", "Urllib3HttpConnection.perform_request"),
61 ("elasticsearch.connection.http_requests", "RequestsHttpConnection.perform_request"),
62 ]
63
64 def call(self, module, method, wrapped, instance, args, kwargs):
65 span = execution_context.get_span()
66 if not span or isinstance(span, DroppedSpan):
67 return wrapped(*args, **kwargs)
68
69 self._update_context_by_request_data(span.context, instance, args, kwargs)
70
71 result = wrapped(*args, **kwargs)
72 if hasattr(result, "meta"): # elasticsearch-py 8.x+
73 status_code = result.meta.status
74 else:
75 status_code = result[0]
76 span.context["http"] = {"status_code": status_code}
77
78 return result
79
80 def _update_context_by_request_data(self, context, instance, args, kwargs):
81 args_len = len(args)
82 url = args[1] if args_len > 1 else kwargs.get("url")
83 params = args[2] if args_len > 2 else kwargs.get("params")
84 body_serialized = args[3] if args_len > 3 else kwargs.get("body")
85
86 if "?" in url and not params:
87 url, qs = url.split("?", 1)
88 params = {k: v[0] for k, v in parse_qs(qs).items()}
89
90 should_capture_body = bool(should_capture_body_re.search(url))
91
92 context["db"] = {"type": "elasticsearch"}
93 if should_capture_body:
94 query = []
95 # using both q AND body is allowed in some API endpoints / ES versions,
96 # but not in others. We simply capture both if they are there so the
97 # user can see it.
98 if params and "q" in params:
99 # 'q' may already be encoded to a byte string at this point.
100 # We assume utf8, which is the default
101 q = params["q"]
102 if isinstance(q, bytes):
103 q = q.decode("utf-8", errors="replace")
104 query.append("q=" + q)
105 if body_serialized:
106 if isinstance(body_serialized, bytes):
107 query.append(body_serialized.decode("utf-8", errors="replace"))
108 else:
109 query.append(body_serialized)
110 if query:
111 context["db"]["statement"] = "\n\n".join(query)
112
113 # ES5: `host` is URL, no `port` attribute
114 # ES6, ES7: `host` URL, `hostname` is host, `port` is port
115 # ES8: `host` is hostname, no `hostname` attribute, `port` is `port`
116 if not hasattr(instance, "port"):
117 # ES5, parse hostname and port from URL stored in `host`
118 parsed_url = urlparse(instance.host)
119 host = parsed_url.hostname
120 port = parsed_url.port
121 elif not hasattr(instance, "hostname"):
122 # ES8 (and up, one can hope)
123 host = instance.host
124 port = instance.port
125 else:
126 # ES6, ES7
127 host = instance.hostname
128 port = instance.port
129
130 context["destination"] = {"address": host, "port": port}
131
132
133 class ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):
134 name = "elasticsearch_connection"
135
136 def get_instrument_list(self):
137 try:
138 import elastic_transport # noqa: F401
139
140 return [
141 ("elastic_transport", "Transport.perform_request"),
142 ]
143 except ImportError:
144 return [
145 ("elasticsearch.transport", "Transport.perform_request"),
146 ]
147
148 def call(self, module, method, wrapped, instance, args, kwargs):
149 with elasticapm.capture_span(
150 self._get_signature(args, kwargs),
151 span_type="db",
152 span_subtype="elasticsearch",
153 span_action="query",
154 extra={},
155 skip_frames=2,
156 leaf=True,
157 ) as span:
158 result_data = wrapped(*args, **kwargs)
159
160 hits = self._get_hits(result_data)
161 if hits:
162 span.context["db"]["rows_affected"] = hits
163
164 return result_data
165
166 def _get_signature(self, args, kwargs):
167 args_len = len(args)
168 http_method = args[0] if args_len else kwargs.get("method")
169 http_path = args[1] if args_len > 1 else kwargs.get("url")
170 http_path = http_path.split("?", 1)[0] # we don't want to capture a potential query string in the span name
171
172 return "ES %s %s" % (http_method, http_path)
173
174 def _get_hits(self, result) -> Optional[int]:
175 if getattr(result, "body", None) and "hits" in result.body: # ES >= 8
176 return result.body["hits"]["total"]["value"]
177 elif isinstance(result, dict) and "hits" in result:
178 return (
179 result["hits"]["total"]["value"]
180 if isinstance(result["hits"]["total"], dict)
181 else result["hits"]["total"]
182 )
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py
--- a/elasticapm/instrumentation/packages/elasticsearch.py
+++ b/elasticapm/instrumentation/packages/elasticsearch.py
@@ -71,9 +71,13 @@
result = wrapped(*args, **kwargs)
if hasattr(result, "meta"): # elasticsearch-py 8.x+
status_code = result.meta.status
+ cluster = result.meta.headers.get("x-found-handling-cluster")
else:
status_code = result[0]
+ cluster = result[1].get("x-found-handling-cluster")
span.context["http"] = {"status_code": status_code}
+ if cluster:
+ span.context["db"] = {"instance": cluster}
return result
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/elasticsearch.py\n@@ -71,9 +71,13 @@\n result = wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n+ cluster = result.meta.headers.get(\"x-found-handling-cluster\")\n else:\n status_code = result[0]\n+ cluster = result[1].get(\"x-found-handling-cluster\")\n span.context[\"http\"] = {\"status_code\": status_code}\n+ if cluster:\n+ span.context[\"db\"] = {\"instance\": cluster}\n \n return result\n", "issue": "[META 659] Backend dependencies granularity for NoSQL and Messaging\nSee meta issue for the description and details:\r\n- Meta issue: https://github.com/elastic/apm/issues/659\r\n\r\nThis is done for everything except for Elasticsearch.\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport re\nfrom typing import Optional\nfrom urllib.parse import parse_qs, urlparse\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, execution_context\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nshould_capture_body_re = re.compile(\"/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)\")\n\n\nclass ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport._node._http_urllib3\", \"Urllib3HttpNode.perform_request\"),\n (\"elastic_transport._node._http_requests\", \"RequestsHttpNode.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n return wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n result = wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n else:\n status_code = result[0]\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return result\n\n def _update_context_by_request_data(self, context, instance, args, kwargs):\n args_len = len(args)\n url = args[1] if args_len > 1 else kwargs.get(\"url\")\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n\n if \"?\" in url and not params:\n url, qs = url.split(\"?\", 1)\n params = {k: v[0] for k, v in parse_qs(qs).items()}\n\n should_capture_body = bool(should_capture_body_re.search(url))\n\n context[\"db\"] = {\"type\": \"elasticsearch\"}\n if should_capture_body:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' may already be encoded to a byte string at this point.\n # We assume utf8, which is the default\n q = params[\"q\"]\n if isinstance(q, bytes):\n q = q.decode(\"utf-8\", errors=\"replace\")\n query.append(\"q=\" + q)\n if body_serialized:\n if isinstance(body_serialized, bytes):\n query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n else:\n query.append(body_serialized)\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n\n # ES5: `host` is URL, no `port` attribute\n # ES6, ES7: `host` URL, `hostname` is host, `port` is port\n # ES8: `host` is hostname, no `hostname` attribute, `port` is `port`\n if not hasattr(instance, \"port\"):\n # ES5, parse hostname and port from URL stored in `host`\n parsed_url = urlparse(instance.host)\n host = parsed_url.hostname\n port = parsed_url.port\n elif not hasattr(instance, \"hostname\"):\n # ES8 (and up, one can hope)\n host = instance.host\n port = instance.port\n else:\n # ES6, ES7\n host = instance.hostname\n port = instance.port\n\n context[\"destination\"] = {\"address\": host, \"port\": port}\n\n\nclass ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport\", \"Transport.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch.transport\", \"Transport.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n with elasticapm.capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = wrapped(*args, **kwargs)\n\n hits = self._get_hits(result_data)\n if hits:\n span.context[\"db\"][\"rows_affected\"] = hits\n\n return result_data\n\n def _get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n http_path = http_path.split(\"?\", 1)[0] # we don't want to capture a potential query string in the span name\n\n return \"ES %s %s\" % (http_method, http_path)\n\n def _get_hits(self, result) -> Optional[int]:\n if getattr(result, \"body\", None) and \"hits\" in result.body: # ES >= 8\n return result.body[\"hits\"][\"total\"][\"value\"]\n elif isinstance(result, dict) and \"hits\" in result:\n return (\n result[\"hits\"][\"total\"][\"value\"]\n if isinstance(result[\"hits\"][\"total\"], dict)\n else result[\"hits\"][\"total\"]\n )\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport re\nfrom typing import Optional\nfrom urllib.parse import parse_qs, urlparse\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, execution_context\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nshould_capture_body_re = re.compile(\"/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)\")\n\n\nclass ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport._node._http_urllib3\", \"Urllib3HttpNode.perform_request\"),\n (\"elastic_transport._node._http_requests\", \"RequestsHttpNode.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n return wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n result = wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n cluster = result.meta.headers.get(\"x-found-handling-cluster\")\n else:\n status_code = result[0]\n cluster = result[1].get(\"x-found-handling-cluster\")\n span.context[\"http\"] = {\"status_code\": status_code}\n if cluster:\n span.context[\"db\"] = {\"instance\": cluster}\n\n return result\n\n def _update_context_by_request_data(self, context, instance, args, kwargs):\n args_len = len(args)\n url = args[1] if args_len > 1 else kwargs.get(\"url\")\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n\n if \"?\" in url and not params:\n url, qs = url.split(\"?\", 1)\n params = {k: v[0] for k, v in parse_qs(qs).items()}\n\n should_capture_body = bool(should_capture_body_re.search(url))\n\n context[\"db\"] = {\"type\": \"elasticsearch\"}\n if should_capture_body:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' may already be encoded to a byte string at this point.\n # We assume utf8, which is the default\n q = params[\"q\"]\n if isinstance(q, bytes):\n q = q.decode(\"utf-8\", errors=\"replace\")\n query.append(\"q=\" + q)\n if body_serialized:\n if isinstance(body_serialized, bytes):\n query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n else:\n query.append(body_serialized)\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n\n # ES5: `host` is URL, no `port` attribute\n # ES6, ES7: `host` URL, `hostname` is host, `port` is port\n # ES8: `host` is hostname, no `hostname` attribute, `port` is `port`\n if not hasattr(instance, \"port\"):\n # ES5, parse hostname and port from URL stored in `host`\n parsed_url = urlparse(instance.host)\n host = parsed_url.hostname\n port = parsed_url.port\n elif not hasattr(instance, \"hostname\"):\n # ES8 (and up, one can hope)\n host = instance.host\n port = instance.port\n else:\n # ES6, ES7\n host = instance.hostname\n port = instance.port\n\n context[\"destination\"] = {\"address\": host, \"port\": port}\n\n\nclass ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport\", \"Transport.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch.transport\", \"Transport.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n with elasticapm.capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = wrapped(*args, **kwargs)\n\n hits = self._get_hits(result_data)\n if hits:\n span.context[\"db\"][\"rows_affected\"] = hits\n\n return result_data\n\n def _get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n http_path = http_path.split(\"?\", 1)[0] # we don't want to capture a potential query string in the span name\n\n return \"ES %s %s\" % (http_method, http_path)\n\n def _get_hits(self, result) -> Optional[int]:\n if getattr(result, \"body\", None) and \"hits\" in result.body: # ES >= 8\n return result.body[\"hits\"][\"total\"][\"value\"]\n elif isinstance(result, dict) and \"hits\" in result:\n return (\n result[\"hits\"][\"total\"][\"value\"]\n if isinstance(result[\"hits\"][\"total\"], dict)\n else result[\"hits\"][\"total\"]\n )\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}]} | 2,411 | 184 |
gh_patches_debug_14354 | rasdani/github-patches | git_diff | azavea__raster-vision-746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Evaluation for object detection with AOIs crashes
```
Running evaluator: ObjectDetectionEvaluator...
2019-03-28 16:47:07:rastervision.evaluation.classification_evaluator: INFO - Computing evaluation for scene 01986917-30ea-4f7f-8e01-985d73b8aa2a...
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 260, in run_command
rv.runner.CommandRunner.run(command_config_uri)
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
CommandRunner.run_from_proto(msg)
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
command.run()
File "/opt/src/rastervision/command/eval_command.py", line 24, in run
evaluator.process(scenes, tmp_dir)
File "/opt/src/rastervision/evaluation/classification_evaluator.py", line 36, in process
scene.aoi_polygons)
File "/opt/src/rastervision/data/label/object_detection_labels.py", line 70, in filter_by_aoi
np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))
File "/opt/src/rastervision/data/label/object_detection_labels.py", line 27, in __init__
self.boxlist = BoxList(npboxes)
File "/opt/tf-models/object_detection/utils/np_box_list.py", line 46, in __init__
raise ValueError('Invalid dimensions for box data.')
ValueError: Invalid dimensions for box data.
/tmp/tmpwie3_vrf/tmp41bj3kgh/Makefile:9: recipe for target '3' failed
make: *** [3] Error 1
make: *** Waiting for unfinished jobs....
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/data/label/object_detection_labels.py`
Content:
```
1 import numpy as np
2 from shapely.geometry import shape
3
4 from rastervision.core.box import Box
5 from rastervision.data.label import Labels
6
7
8 class ObjectDetectionLabels(Labels):
9 """A set of boxes and associated class_ids and scores.
10
11 Implemented using the Tensorflow Object Detection API's BoxList class.
12 """
13
14 def __init__(self, npboxes, class_ids, scores=None):
15 """Construct a set of object detection labels.
16
17 Args:
18 npboxes: float numpy array of size nx4 with cols
19 ymin, xmin, ymax, xmax. Should be in pixel coordinates within
20 the global frame of reference.
21 class_ids: int numpy array of size n with class ids starting at 1
22 scores: float numpy array of size n
23 """
24 # Lazily load TF Object Detection
25 from object_detection.utils.np_box_list import BoxList
26
27 self.boxlist = BoxList(npboxes)
28 # This field name actually needs to be 'classes' to be able to use
29 # certain utility functions in the TF Object Detection API.
30 self.boxlist.add_field('classes', class_ids)
31 # We need to ensure that there is always a scores field so that the
32 # concatenate method will work with empty labels objects.
33 if scores is None:
34 scores = np.ones(class_ids.shape)
35 self.boxlist.add_field('scores', scores)
36
37 def __add__(self, other):
38 return ObjectDetectionLabels.concatenate(self, other)
39
40 def __eq__(self, other):
41 return (isinstance(other, ObjectDetectionLabels)
42 and self.to_dict() == other.to_dict())
43
44 def assert_equal(self, expected_labels):
45 np.testing.assert_array_equal(self.get_npboxes(),
46 expected_labels.get_npboxes())
47 np.testing.assert_array_equal(self.get_class_ids(),
48 expected_labels.get_class_ids())
49 np.testing.assert_array_equal(self.get_scores(),
50 expected_labels.get_scores())
51
52 def filter_by_aoi(self, aoi_polygons):
53 boxes = self.get_boxes()
54 class_ids = self.get_class_ids()
55 scores = self.get_scores()
56
57 new_boxes = []
58 new_class_ids = []
59 new_scores = []
60 for box, class_id, score in zip(boxes, class_ids, scores):
61 box_poly = box.to_shapely()
62 for aoi in aoi_polygons:
63 if box_poly.within(aoi):
64 new_boxes.append(box)
65 new_class_ids.append(class_id)
66 new_scores.append(score)
67 break
68
69 return ObjectDetectionLabels(
70 np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))
71
72 @staticmethod
73 def make_empty():
74 npboxes = np.empty((0, 4))
75 class_ids = np.empty((0, ))
76 scores = np.empty((0, ))
77 return ObjectDetectionLabels(npboxes, class_ids, scores)
78
79 @staticmethod
80 def from_boxlist(boxlist):
81 """Make ObjectDetectionLabels from BoxList object."""
82 scores = (boxlist.get_field('scores')
83 if boxlist.has_field('scores') else None)
84 return ObjectDetectionLabels(
85 boxlist.get(), boxlist.get_field('classes'), scores=scores)
86
87 @staticmethod
88 def from_geojson(geojson, extent=None):
89 """Convert GeoJSON to ObjectDetectionLabels object.
90
91 If extent is provided, filter out the boxes that lie "more than a little
92 bit" outside the extent.
93
94 Args:
95 geojson: (dict) normalized GeoJSON (see VectorSource)
96 extent: (Box) in pixel coords
97
98 Returns:
99 ObjectDetectionLabels
100 """
101 boxes = []
102 class_ids = []
103 scores = []
104
105 for f in geojson['features']:
106 geom = shape(f['geometry'])
107 (xmin, ymin, xmax, ymax) = geom.bounds
108 boxes.append(Box(ymin, xmin, ymax, xmax))
109
110 props = f['properties']
111 class_ids.append(props['class_id'])
112 scores.append(props.get('score', 1.0))
113
114 if len(boxes):
115 boxes = np.array(
116 [box.npbox_format() for box in boxes], dtype=float)
117 class_ids = np.array(class_ids)
118 scores = np.array(scores)
119 labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)
120 else:
121 labels = ObjectDetectionLabels.make_empty()
122
123 if extent is not None:
124 labels = ObjectDetectionLabels.get_overlapping(
125 labels, extent, ioa_thresh=0.8, clip=True)
126 return labels
127
128 def get_boxes(self):
129 """Return list of Boxes."""
130 return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]
131
132 def get_npboxes(self):
133 return self.boxlist.get()
134
135 def get_scores(self):
136 if self.boxlist.has_field('scores'):
137 return self.boxlist.get_field('scores')
138 return None
139
140 def get_class_ids(self):
141 return self.boxlist.get_field('classes')
142
143 def __len__(self):
144 return self.boxlist.get().shape[0]
145
146 def __str__(self):
147 return str(self.boxlist.get())
148
149 def to_boxlist(self):
150 return self.boxlist
151
152 def to_dict(self):
153 """Returns a dict version of these labels.
154
155 The Dict has a Box as a key, and a tuple of (class_id, score)
156 as the values.
157 """
158 d = {}
159 boxes = list(map(Box.from_npbox, self.get_npboxes()))
160 classes = list(self.get_class_ids())
161 scores = list(self.get_scores())
162 for box, class_id, score in zip(boxes, classes, scores):
163 d[box.tuple_format()] = (class_id, score)
164 return d
165
166 @staticmethod
167 def local_to_global(npboxes, window):
168 """Convert from local to global coordinates.
169
170 The local coordinates are row/col within the window frame of reference.
171 The global coordinates are row/col within the extent of a RasterSource.
172 """
173 xmin = window.xmin
174 ymin = window.ymin
175 return npboxes + np.array([[ymin, xmin, ymin, xmin]])
176
177 @staticmethod
178 def global_to_local(npboxes, window):
179 """Convert from global to local coordinates.
180
181 The global coordinates are row/col within the extent of a RasterSource.
182 The local coordinates are row/col within the window frame of reference.
183 """
184 xmin = window.xmin
185 ymin = window.ymin
186 return npboxes - np.array([[ymin, xmin, ymin, xmin]])
187
188 @staticmethod
189 def local_to_normalized(npboxes, window):
190 """Convert from local to normalized coordinates.
191
192 The local coordinates are row/col within the window frame of reference.
193 Normalized coordinates range from 0 to 1 on each (height/width) axis.
194 """
195 height = window.get_height()
196 width = window.get_width()
197 return npboxes / np.array([[height, width, height, width]])
198
199 @staticmethod
200 def normalized_to_local(npboxes, window):
201 """Convert from normalized to local coordinates.
202
203 Normalized coordinates range from 0 to 1 on each (height/width) axis.
204 The local coordinates are row/col within the window frame of reference.
205 """
206 height = window.get_height()
207 width = window.get_width()
208 return npboxes * np.array([[height, width, height, width]])
209
210 @staticmethod
211 def get_overlapping(labels, window, ioa_thresh=0.000001, clip=False):
212 """Return subset of labels that overlap with window.
213
214 Args:
215 labels: ObjectDetectionLabels
216 window: Box
217 ioa_thresh: the minimum IOA for a box to be considered as
218 overlapping
219 clip: if True, clip label boxes to the window
220 """
221 # Lazily load TF Object Detection
222 from object_detection.utils.np_box_list import BoxList
223 from object_detection.utils.np_box_list_ops import (
224 prune_non_overlapping_boxes, clip_to_window)
225
226 window_npbox = window.npbox_format()
227 window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))
228 boxlist = prune_non_overlapping_boxes(
229 labels.boxlist, window_boxlist, minoverlap=ioa_thresh)
230 if clip:
231 boxlist = clip_to_window(boxlist, window_npbox)
232
233 return ObjectDetectionLabels.from_boxlist(boxlist)
234
235 @staticmethod
236 def concatenate(labels1, labels2):
237 """Return concatenation of labels.
238
239 Args:
240 labels1: ObjectDetectionLabels
241 labels2: ObjectDetectionLabels
242 """
243 from object_detection.utils.np_box_list_ops import concatenate
244
245 new_boxlist = concatenate([labels1.to_boxlist(), labels2.to_boxlist()])
246 return ObjectDetectionLabels.from_boxlist(new_boxlist)
247
248 @staticmethod
249 def prune_duplicates(labels, score_thresh, merge_thresh):
250 """Remove duplicate boxes.
251
252 Runs non-maximum suppression to remove duplicate boxes that result from
253 sliding window prediction algorithm.
254
255 Args:
256 labels: ObjectDetectionLabels
257 score_thresh: the minimum allowed score of boxes
258 merge_thresh: the minimum IOA allowed when merging two boxes
259 together
260
261 Returns:
262 ObjectDetectionLabels
263 """
264 from object_detection.utils.np_box_list_ops import non_max_suppression
265
266 max_output_size = 1000000
267 pruned_boxlist = non_max_suppression(
268 labels.boxlist,
269 max_output_size=max_output_size,
270 iou_threshold=merge_thresh,
271 score_threshold=score_thresh)
272 return ObjectDetectionLabels.from_boxlist(pruned_boxlist)
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision/data/label/object_detection_labels.py b/rastervision/data/label/object_detection_labels.py
--- a/rastervision/data/label/object_detection_labels.py
+++ b/rastervision/data/label/object_detection_labels.py
@@ -61,11 +61,14 @@
box_poly = box.to_shapely()
for aoi in aoi_polygons:
if box_poly.within(aoi):
- new_boxes.append(box)
+ new_boxes.append(box.npbox_format())
new_class_ids.append(class_id)
new_scores.append(score)
break
+ if len(new_boxes) == 0:
+ return ObjectDetectionLabels.make_empty()
+
return ObjectDetectionLabels(
np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))
| {"golden_diff": "diff --git a/rastervision/data/label/object_detection_labels.py b/rastervision/data/label/object_detection_labels.py\n--- a/rastervision/data/label/object_detection_labels.py\n+++ b/rastervision/data/label/object_detection_labels.py\n@@ -61,11 +61,14 @@\n box_poly = box.to_shapely()\n for aoi in aoi_polygons:\n if box_poly.within(aoi):\n- new_boxes.append(box)\n+ new_boxes.append(box.npbox_format())\n new_class_ids.append(class_id)\n new_scores.append(score)\n break\n \n+ if len(new_boxes) == 0:\n+ return ObjectDetectionLabels.make_empty()\n+\n return ObjectDetectionLabels(\n np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))\n", "issue": "Evaluation for object detection with AOIs crashes\n```\r\nRunning evaluator: ObjectDetectionEvaluator...\r\n2019-03-28 16:47:07:rastervision.evaluation.classification_evaluator: INFO - Computing evaluation for scene 01986917-30ea-4f7f-8e01-985d73b8aa2a...\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.5/runpy.py\", line 184, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/opt/src/rastervision/__main__.py\", line 17, in <module>\r\n rv.main()\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/opt/src/rastervision/cli/main.py\", line 260, in run_command\r\n rv.runner.CommandRunner.run(command_config_uri)\r\n File \"/opt/src/rastervision/runner/command_runner.py\", line 11, in run\r\n CommandRunner.run_from_proto(msg)\r\n File \"/opt/src/rastervision/runner/command_runner.py\", line 17, in run_from_proto\r\n command.run()\r\n File \"/opt/src/rastervision/command/eval_command.py\", line 24, in run\r\n evaluator.process(scenes, tmp_dir)\r\n File \"/opt/src/rastervision/evaluation/classification_evaluator.py\", line 36, in process\r\n scene.aoi_polygons)\r\n File \"/opt/src/rastervision/data/label/object_detection_labels.py\", line 70, in filter_by_aoi\r\n np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))\r\n File \"/opt/src/rastervision/data/label/object_detection_labels.py\", line 27, in __init__\r\n self.boxlist = BoxList(npboxes)\r\n File \"/opt/tf-models/object_detection/utils/np_box_list.py\", line 46, in __init__\r\n raise ValueError('Invalid dimensions for box data.')\r\nValueError: Invalid dimensions for box data.\r\n/tmp/tmpwie3_vrf/tmp41bj3kgh/Makefile:9: recipe for target '3' failed\r\nmake: *** [3] Error 1\r\nmake: *** Waiting for unfinished jobs....\r\n```\n", "before_files": [{"content": "import numpy as np\nfrom shapely.geometry import shape\n\nfrom rastervision.core.box import Box\nfrom rastervision.data.label import Labels\n\n\nclass ObjectDetectionLabels(Labels):\n \"\"\"A set of boxes and associated class_ids and scores.\n\n Implemented using the Tensorflow Object Detection API's BoxList class.\n \"\"\"\n\n def __init__(self, npboxes, class_ids, scores=None):\n \"\"\"Construct a set of object detection labels.\n\n Args:\n npboxes: float numpy array of size nx4 with cols\n ymin, xmin, ymax, xmax. Should be in pixel coordinates within\n the global frame of reference.\n class_ids: int numpy array of size n with class ids starting at 1\n scores: float numpy array of size n\n \"\"\"\n # Lazily load TF Object Detection\n from object_detection.utils.np_box_list import BoxList\n\n self.boxlist = BoxList(npboxes)\n # This field name actually needs to be 'classes' to be able to use\n # certain utility functions in the TF Object Detection API.\n self.boxlist.add_field('classes', class_ids)\n # We need to ensure that there is always a scores field so that the\n # concatenate method will work with empty labels objects.\n if scores is None:\n scores = np.ones(class_ids.shape)\n self.boxlist.add_field('scores', scores)\n\n def __add__(self, other):\n return ObjectDetectionLabels.concatenate(self, other)\n\n def __eq__(self, other):\n return (isinstance(other, ObjectDetectionLabels)\n and self.to_dict() == other.to_dict())\n\n def assert_equal(self, expected_labels):\n np.testing.assert_array_equal(self.get_npboxes(),\n expected_labels.get_npboxes())\n np.testing.assert_array_equal(self.get_class_ids(),\n expected_labels.get_class_ids())\n np.testing.assert_array_equal(self.get_scores(),\n expected_labels.get_scores())\n\n def filter_by_aoi(self, aoi_polygons):\n boxes = self.get_boxes()\n class_ids = self.get_class_ids()\n scores = self.get_scores()\n\n new_boxes = []\n new_class_ids = []\n new_scores = []\n for box, class_id, score in zip(boxes, class_ids, scores):\n box_poly = box.to_shapely()\n for aoi in aoi_polygons:\n if box_poly.within(aoi):\n new_boxes.append(box)\n new_class_ids.append(class_id)\n new_scores.append(score)\n break\n\n return ObjectDetectionLabels(\n np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))\n\n @staticmethod\n def make_empty():\n npboxes = np.empty((0, 4))\n class_ids = np.empty((0, ))\n scores = np.empty((0, ))\n return ObjectDetectionLabels(npboxes, class_ids, scores)\n\n @staticmethod\n def from_boxlist(boxlist):\n \"\"\"Make ObjectDetectionLabels from BoxList object.\"\"\"\n scores = (boxlist.get_field('scores')\n if boxlist.has_field('scores') else None)\n return ObjectDetectionLabels(\n boxlist.get(), boxlist.get_field('classes'), scores=scores)\n\n @staticmethod\n def from_geojson(geojson, extent=None):\n \"\"\"Convert GeoJSON to ObjectDetectionLabels object.\n\n If extent is provided, filter out the boxes that lie \"more than a little\n bit\" outside the extent.\n\n Args:\n geojson: (dict) normalized GeoJSON (see VectorSource)\n extent: (Box) in pixel coords\n\n Returns:\n ObjectDetectionLabels\n \"\"\"\n boxes = []\n class_ids = []\n scores = []\n\n for f in geojson['features']:\n geom = shape(f['geometry'])\n (xmin, ymin, xmax, ymax) = geom.bounds\n boxes.append(Box(ymin, xmin, ymax, xmax))\n\n props = f['properties']\n class_ids.append(props['class_id'])\n scores.append(props.get('score', 1.0))\n\n if len(boxes):\n boxes = np.array(\n [box.npbox_format() for box in boxes], dtype=float)\n class_ids = np.array(class_ids)\n scores = np.array(scores)\n labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)\n else:\n labels = ObjectDetectionLabels.make_empty()\n\n if extent is not None:\n labels = ObjectDetectionLabels.get_overlapping(\n labels, extent, ioa_thresh=0.8, clip=True)\n return labels\n\n def get_boxes(self):\n \"\"\"Return list of Boxes.\"\"\"\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]\n\n def get_npboxes(self):\n return self.boxlist.get()\n\n def get_scores(self):\n if self.boxlist.has_field('scores'):\n return self.boxlist.get_field('scores')\n return None\n\n def get_class_ids(self):\n return self.boxlist.get_field('classes')\n\n def __len__(self):\n return self.boxlist.get().shape[0]\n\n def __str__(self):\n return str(self.boxlist.get())\n\n def to_boxlist(self):\n return self.boxlist\n\n def to_dict(self):\n \"\"\"Returns a dict version of these labels.\n\n The Dict has a Box as a key, and a tuple of (class_id, score)\n as the values.\n \"\"\"\n d = {}\n boxes = list(map(Box.from_npbox, self.get_npboxes()))\n classes = list(self.get_class_ids())\n scores = list(self.get_scores())\n for box, class_id, score in zip(boxes, classes, scores):\n d[box.tuple_format()] = (class_id, score)\n return d\n\n @staticmethod\n def local_to_global(npboxes, window):\n \"\"\"Convert from local to global coordinates.\n\n The local coordinates are row/col within the window frame of reference.\n The global coordinates are row/col within the extent of a RasterSource.\n \"\"\"\n xmin = window.xmin\n ymin = window.ymin\n return npboxes + np.array([[ymin, xmin, ymin, xmin]])\n\n @staticmethod\n def global_to_local(npboxes, window):\n \"\"\"Convert from global to local coordinates.\n\n The global coordinates are row/col within the extent of a RasterSource.\n The local coordinates are row/col within the window frame of reference.\n \"\"\"\n xmin = window.xmin\n ymin = window.ymin\n return npboxes - np.array([[ymin, xmin, ymin, xmin]])\n\n @staticmethod\n def local_to_normalized(npboxes, window):\n \"\"\"Convert from local to normalized coordinates.\n\n The local coordinates are row/col within the window frame of reference.\n Normalized coordinates range from 0 to 1 on each (height/width) axis.\n \"\"\"\n height = window.get_height()\n width = window.get_width()\n return npboxes / np.array([[height, width, height, width]])\n\n @staticmethod\n def normalized_to_local(npboxes, window):\n \"\"\"Convert from normalized to local coordinates.\n\n Normalized coordinates range from 0 to 1 on each (height/width) axis.\n The local coordinates are row/col within the window frame of reference.\n \"\"\"\n height = window.get_height()\n width = window.get_width()\n return npboxes * np.array([[height, width, height, width]])\n\n @staticmethod\n def get_overlapping(labels, window, ioa_thresh=0.000001, clip=False):\n \"\"\"Return subset of labels that overlap with window.\n\n Args:\n labels: ObjectDetectionLabels\n window: Box\n ioa_thresh: the minimum IOA for a box to be considered as\n overlapping\n clip: if True, clip label boxes to the window\n \"\"\"\n # Lazily load TF Object Detection\n from object_detection.utils.np_box_list import BoxList\n from object_detection.utils.np_box_list_ops import (\n prune_non_overlapping_boxes, clip_to_window)\n\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n labels.boxlist, window_boxlist, minoverlap=ioa_thresh)\n if clip:\n boxlist = clip_to_window(boxlist, window_npbox)\n\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n @staticmethod\n def concatenate(labels1, labels2):\n \"\"\"Return concatenation of labels.\n\n Args:\n labels1: ObjectDetectionLabels\n labels2: ObjectDetectionLabels\n \"\"\"\n from object_detection.utils.np_box_list_ops import concatenate\n\n new_boxlist = concatenate([labels1.to_boxlist(), labels2.to_boxlist()])\n return ObjectDetectionLabels.from_boxlist(new_boxlist)\n\n @staticmethod\n def prune_duplicates(labels, score_thresh, merge_thresh):\n \"\"\"Remove duplicate boxes.\n\n Runs non-maximum suppression to remove duplicate boxes that result from\n sliding window prediction algorithm.\n\n Args:\n labels: ObjectDetectionLabels\n score_thresh: the minimum allowed score of boxes\n merge_thresh: the minimum IOA allowed when merging two boxes\n together\n\n Returns:\n ObjectDetectionLabels\n \"\"\"\n from object_detection.utils.np_box_list_ops import non_max_suppression\n\n max_output_size = 1000000\n pruned_boxlist = non_max_suppression(\n labels.boxlist,\n max_output_size=max_output_size,\n iou_threshold=merge_thresh,\n score_threshold=score_thresh)\n return ObjectDetectionLabels.from_boxlist(pruned_boxlist)\n", "path": "rastervision/data/label/object_detection_labels.py"}], "after_files": [{"content": "import numpy as np\nfrom shapely.geometry import shape\n\nfrom rastervision.core.box import Box\nfrom rastervision.data.label import Labels\n\n\nclass ObjectDetectionLabels(Labels):\n \"\"\"A set of boxes and associated class_ids and scores.\n\n Implemented using the Tensorflow Object Detection API's BoxList class.\n \"\"\"\n\n def __init__(self, npboxes, class_ids, scores=None):\n \"\"\"Construct a set of object detection labels.\n\n Args:\n npboxes: float numpy array of size nx4 with cols\n ymin, xmin, ymax, xmax. Should be in pixel coordinates within\n the global frame of reference.\n class_ids: int numpy array of size n with class ids starting at 1\n scores: float numpy array of size n\n \"\"\"\n # Lazily load TF Object Detection\n from object_detection.utils.np_box_list import BoxList\n\n self.boxlist = BoxList(npboxes)\n # This field name actually needs to be 'classes' to be able to use\n # certain utility functions in the TF Object Detection API.\n self.boxlist.add_field('classes', class_ids)\n # We need to ensure that there is always a scores field so that the\n # concatenate method will work with empty labels objects.\n if scores is None:\n scores = np.ones(class_ids.shape)\n self.boxlist.add_field('scores', scores)\n\n def __add__(self, other):\n return ObjectDetectionLabels.concatenate(self, other)\n\n def __eq__(self, other):\n return (isinstance(other, ObjectDetectionLabels)\n and self.to_dict() == other.to_dict())\n\n def assert_equal(self, expected_labels):\n np.testing.assert_array_equal(self.get_npboxes(),\n expected_labels.get_npboxes())\n np.testing.assert_array_equal(self.get_class_ids(),\n expected_labels.get_class_ids())\n np.testing.assert_array_equal(self.get_scores(),\n expected_labels.get_scores())\n\n def filter_by_aoi(self, aoi_polygons):\n boxes = self.get_boxes()\n class_ids = self.get_class_ids()\n scores = self.get_scores()\n\n new_boxes = []\n new_class_ids = []\n new_scores = []\n for box, class_id, score in zip(boxes, class_ids, scores):\n box_poly = box.to_shapely()\n for aoi in aoi_polygons:\n if box_poly.within(aoi):\n new_boxes.append(box.npbox_format())\n new_class_ids.append(class_id)\n new_scores.append(score)\n break\n\n if len(new_boxes) == 0:\n return ObjectDetectionLabels.make_empty()\n\n return ObjectDetectionLabels(\n np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))\n\n @staticmethod\n def make_empty():\n npboxes = np.empty((0, 4))\n class_ids = np.empty((0, ))\n scores = np.empty((0, ))\n return ObjectDetectionLabels(npboxes, class_ids, scores)\n\n @staticmethod\n def from_boxlist(boxlist):\n \"\"\"Make ObjectDetectionLabels from BoxList object.\"\"\"\n scores = (boxlist.get_field('scores')\n if boxlist.has_field('scores') else None)\n return ObjectDetectionLabels(\n boxlist.get(), boxlist.get_field('classes'), scores=scores)\n\n @staticmethod\n def from_geojson(geojson, extent=None):\n \"\"\"Convert GeoJSON to ObjectDetectionLabels object.\n\n If extent is provided, filter out the boxes that lie \"more than a little\n bit\" outside the extent.\n\n Args:\n geojson: (dict) normalized GeoJSON (see VectorSource)\n extent: (Box) in pixel coords\n\n Returns:\n ObjectDetectionLabels\n \"\"\"\n boxes = []\n class_ids = []\n scores = []\n\n for f in geojson['features']:\n geom = shape(f['geometry'])\n (xmin, ymin, xmax, ymax) = geom.bounds\n boxes.append(Box(ymin, xmin, ymax, xmax))\n\n props = f['properties']\n class_ids.append(props['class_id'])\n scores.append(props.get('score', 1.0))\n\n if len(boxes):\n boxes = np.array(\n [box.npbox_format() for box in boxes], dtype=float)\n class_ids = np.array(class_ids)\n scores = np.array(scores)\n labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)\n else:\n labels = ObjectDetectionLabels.make_empty()\n\n if extent is not None:\n labels = ObjectDetectionLabels.get_overlapping(\n labels, extent, ioa_thresh=0.8, clip=True)\n return labels\n\n def get_boxes(self):\n \"\"\"Return list of Boxes.\"\"\"\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]\n\n def get_npboxes(self):\n return self.boxlist.get()\n\n def get_scores(self):\n if self.boxlist.has_field('scores'):\n return self.boxlist.get_field('scores')\n return None\n\n def get_class_ids(self):\n return self.boxlist.get_field('classes')\n\n def __len__(self):\n return self.boxlist.get().shape[0]\n\n def __str__(self):\n return str(self.boxlist.get())\n\n def to_boxlist(self):\n return self.boxlist\n\n def to_dict(self):\n \"\"\"Returns a dict version of these labels.\n\n The Dict has a Box as a key, and a tuple of (class_id, score)\n as the values.\n \"\"\"\n d = {}\n boxes = list(map(Box.from_npbox, self.get_npboxes()))\n classes = list(self.get_class_ids())\n scores = list(self.get_scores())\n for box, class_id, score in zip(boxes, classes, scores):\n d[box.tuple_format()] = (class_id, score)\n return d\n\n @staticmethod\n def local_to_global(npboxes, window):\n \"\"\"Convert from local to global coordinates.\n\n The local coordinates are row/col within the window frame of reference.\n The global coordinates are row/col within the extent of a RasterSource.\n \"\"\"\n xmin = window.xmin\n ymin = window.ymin\n return npboxes + np.array([[ymin, xmin, ymin, xmin]])\n\n @staticmethod\n def global_to_local(npboxes, window):\n \"\"\"Convert from global to local coordinates.\n\n The global coordinates are row/col within the extent of a RasterSource.\n The local coordinates are row/col within the window frame of reference.\n \"\"\"\n xmin = window.xmin\n ymin = window.ymin\n return npboxes - np.array([[ymin, xmin, ymin, xmin]])\n\n @staticmethod\n def local_to_normalized(npboxes, window):\n \"\"\"Convert from local to normalized coordinates.\n\n The local coordinates are row/col within the window frame of reference.\n Normalized coordinates range from 0 to 1 on each (height/width) axis.\n \"\"\"\n height = window.get_height()\n width = window.get_width()\n return npboxes / np.array([[height, width, height, width]])\n\n @staticmethod\n def normalized_to_local(npboxes, window):\n \"\"\"Convert from normalized to local coordinates.\n\n Normalized coordinates range from 0 to 1 on each (height/width) axis.\n The local coordinates are row/col within the window frame of reference.\n \"\"\"\n height = window.get_height()\n width = window.get_width()\n return npboxes * np.array([[height, width, height, width]])\n\n @staticmethod\n def get_overlapping(labels, window, ioa_thresh=0.000001, clip=False):\n \"\"\"Return subset of labels that overlap with window.\n\n Args:\n labels: ObjectDetectionLabels\n window: Box\n ioa_thresh: the minimum IOA for a box to be considered as\n overlapping\n clip: if True, clip label boxes to the window\n \"\"\"\n # Lazily load TF Object Detection\n from object_detection.utils.np_box_list import BoxList\n from object_detection.utils.np_box_list_ops import (\n prune_non_overlapping_boxes, clip_to_window)\n\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n labels.boxlist, window_boxlist, minoverlap=ioa_thresh)\n if clip:\n boxlist = clip_to_window(boxlist, window_npbox)\n\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n @staticmethod\n def concatenate(labels1, labels2):\n \"\"\"Return concatenation of labels.\n\n Args:\n labels1: ObjectDetectionLabels\n labels2: ObjectDetectionLabels\n \"\"\"\n from object_detection.utils.np_box_list_ops import concatenate\n\n new_boxlist = concatenate([labels1.to_boxlist(), labels2.to_boxlist()])\n return ObjectDetectionLabels.from_boxlist(new_boxlist)\n\n @staticmethod\n def prune_duplicates(labels, score_thresh, merge_thresh):\n \"\"\"Remove duplicate boxes.\n\n Runs non-maximum suppression to remove duplicate boxes that result from\n sliding window prediction algorithm.\n\n Args:\n labels: ObjectDetectionLabels\n score_thresh: the minimum allowed score of boxes\n merge_thresh: the minimum IOA allowed when merging two boxes\n together\n\n Returns:\n ObjectDetectionLabels\n \"\"\"\n from object_detection.utils.np_box_list_ops import non_max_suppression\n\n max_output_size = 1000000\n pruned_boxlist = non_max_suppression(\n labels.boxlist,\n max_output_size=max_output_size,\n iou_threshold=merge_thresh,\n score_threshold=score_thresh)\n return ObjectDetectionLabels.from_boxlist(pruned_boxlist)\n", "path": "rastervision/data/label/object_detection_labels.py"}]} | 3,775 | 178 |
gh_patches_debug_4397 | rasdani/github-patches | git_diff | gratipay__gratipay.com-1934 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sign up with a Twitter account fails.
```
Internal server error, program!
Traceback (most recent call last):
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/algorithm.py", line 288, in run
new_state = function(**deps.as_kwargs)
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/algorithms/website.py", line 88, in get_response_for_resource
return {'response': resource.respond(request)}
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py", line 52, in respond
exec self.pages[1] in context
File "/home/sim6/www.gittip.com/www/on/twitter/associate.spt", line 77, in
account = twitter.TwitterAccount(website.db, user_info['id'], user_info)
File "/home/sim6/www.gittip.com/gittip/elsewhere/__init__.py", line 80, in __init__
typecheck(user_id, (int, unicode), user_info, (None, dict))
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/utils.py", line 377, in typecheck
raise TypeError(msg)
TypeError: Check #1: 2304869623L is of type long, not one of: int, unicode.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/elsewhere/__init__.py`
Content:
```
1 """This subpackage contains functionality for working with accounts elsewhere.
2 """
3 from __future__ import print_function, unicode_literals
4 from collections import OrderedDict
5
6 from aspen.utils import typecheck
7 from aspen import json
8 from psycopg2 import IntegrityError
9
10 import gittip
11 from gittip.exceptions import ProblemChangingUsername, UnknownPlatform
12 from gittip.utils.username import reserve_a_random_username
13
14
15 ACTIONS = [u'opt-in', u'connect', u'lock', u'unlock']
16
17
18 # to add a new elsewhere/platform:
19 # 1) add its name (also the name of its module) to this list.
20 # it's best to append it; this ordering is used in templates.
21 # 2) inherit from AccountElsewhere in the platform class
22 #
23 # platform_modules will populate the platform class automatically in configure-aspen.
24 platforms_ordered = (
25 'twitter',
26 'github',
27 'bitbucket',
28 'bountysource',
29 'venmo',
30 'openstreetmap'
31 )
32
33 # init-time key setup ensures the future ordering of platform_classes will match
34 # platforms_ordered, since overwriting entries will maintain their order.
35 platform_classes = OrderedDict([(platform, None) for platform in platforms_ordered])
36
37
38 class _RegisterPlatformMeta(type):
39 """Tied to AccountElsewhere to enable registration by the platform field.
40 """
41
42 def __new__(cls, name, bases, dct):
43 c = super(_RegisterPlatformMeta, cls).__new__(cls, name, bases, dct)
44
45 # * register the platform
46 # * verify it was added at init-time
47 # * register the subclass's json encoder with aspen
48 c_platform = getattr(c, 'platform')
49 if name == 'AccountElsewhere':
50 pass
51 elif c_platform not in platform_classes:
52 raise UnknownPlatform(c_platform) # has it been added to platform_classes init?
53 else:
54 platform_classes[c_platform] = c
55
56 # aspen's json encoder registry does not take class hierarchies into account,
57 # so we need to register the subclasses explicitly.
58 json.register_encoder(c, c.to_json_compatible_object)
59
60 return c
61
62 class AccountElsewhere(object):
63
64 __metaclass__ = _RegisterPlatformMeta
65
66 platform = None # set in subclass
67
68 # only fields in this set will be encoded
69 json_encode_field_whitelist = set([
70 'id', 'is_locked', 'participant', 'platform', 'user_id', 'user_info',
71 ])
72
73 def __init__(self, db, user_id, user_info=None, existing_record=None):
74 """Either:
75 - Takes a user_id and user_info, and updates the database.
76
77 Or:
78 - Takes a user_id and existing_record, and constructs a "model" object out of the record
79 """
80 typecheck(user_id, (int, unicode), user_info, (None, dict))
81 self.user_id = unicode(user_id)
82 self.db = db
83
84 if user_info is not None:
85 a,b,c,d = self.upsert(user_info)
86
87 self.participant = a
88 self.is_claimed = b
89 self.is_locked = c
90 self.balance = d
91
92 self.user_info = user_info
93
94 # hack to make this into a weird pseudo-model that can share convenience methods
95 elif existing_record is not None:
96 self.participant = existing_record.participant
97 self.is_claimed, self.is_locked, self.balance = self.get_misc_info(self.participant)
98 self.user_info = existing_record.user_info
99 self.record = existing_record
100
101 def to_json_compatible_object(self):
102 """
103 This is registered as an aspen.json encoder in configure-aspen
104 for all subclasses of this class.
105
106 It only exports fields in the whitelist.
107 """
108 output = {k: v for (k,v) in self.record._asdict().items()
109 if k in self.json_encode_field_whitelist}
110
111 return output
112
113 def set_is_locked(self, is_locked):
114 self.db.run("""
115
116 UPDATE elsewhere
117 SET is_locked=%s
118 WHERE platform=%s AND user_id=%s
119
120 """, (is_locked, self.platform, self.user_id))
121
122
123 def opt_in(self, desired_username):
124 """Given a desired username, return a User object.
125 """
126 from gittip.security.user import User
127
128 self.set_is_locked(False)
129 user = User.from_username(self.participant)
130 user.sign_in()
131 assert not user.ANON, self.participant # sanity check
132 if self.is_claimed:
133 newly_claimed = False
134 else:
135 newly_claimed = True
136 user.participant.set_as_claimed()
137 try:
138 user.participant.change_username(desired_username)
139 except ProblemChangingUsername:
140 pass
141 return user, newly_claimed
142
143
144 def upsert(self, user_info):
145 """Given a dict, return a tuple.
146
147 User_id is an immutable unique identifier for the given user on the
148 given platform. Username is the user's login/username on the given
149 platform. It is only used here for logging. Specifically, we don't
150 reserve their username for them on Gittip if they're new here. We give
151 them a random username here, and they'll have a chance to change it
152 if/when they opt in. User_id and username may or may not be the same.
153 User_info is a dictionary of profile info per the named platform. All
154 platform dicts must have an id key that corresponds to the primary key
155 in the underlying table in our own db.
156
157 The return value is a tuple: (username [unicode], is_claimed [boolean],
158 is_locked [boolean], balance [Decimal]).
159
160 """
161 typecheck(user_info, dict)
162
163
164 # Insert the account if needed.
165 # =============================
166 # Do this with a transaction so that if the insert fails, the
167 # participant we reserved for them is rolled back as well.
168
169 try:
170 with self.db.get_cursor() as cursor:
171 _username = reserve_a_random_username(cursor)
172 cursor.execute( "INSERT INTO elsewhere "
173 "(platform, user_id, participant) "
174 "VALUES (%s, %s, %s)"
175 , (self.platform, self.user_id, _username)
176 )
177 except IntegrityError:
178 pass
179
180
181 # Update their user_info.
182 # =======================
183 # Cast everything to unicode, because (I believe) hstore can take any
184 # type of value, but psycopg2 can't.
185 #
186 # https://postgres.heroku.com/blog/past/2012/3/14/introducing_keyvalue_data_storage_in_heroku_postgres/
187 # http://initd.org/psycopg/docs/extras.html#hstore-data-type
188 #
189 # XXX This clobbers things, of course, such as booleans. See
190 # /on/bitbucket/%username/index.html
191
192 for k, v in user_info.items():
193 user_info[k] = unicode(v)
194
195
196 username = self.db.one("""
197
198 UPDATE elsewhere
199 SET user_info=%s
200 WHERE platform=%s AND user_id=%s
201 RETURNING participant
202
203 """, (user_info, self.platform, self.user_id))
204
205 return (username,) + self.get_misc_info(username)
206
207 def get_misc_info(self, username):
208 rec = self.db.one("""
209
210 SELECT claimed_time, balance, is_locked
211 FROM participants
212 JOIN elsewhere
213 ON participants.username=participant
214 WHERE platform=%s
215 AND participants.username=%s
216
217 """, (self.platform, username))
218
219 assert rec is not None # sanity check
220
221 return ( rec.claimed_time is not None
222 , rec.is_locked
223 , rec.balance
224 )
225
226 def set_oauth_tokens(self, access_token, refresh_token, expires):
227 """
228 Updates the elsewhere row with the given access token, refresh token, and Python datetime
229 """
230
231 self.db.run("""
232 UPDATE elsewhere
233 SET (access_token, refresh_token, expires)
234 = (%s, %s, %s)
235 WHERE platform=%s AND user_id=%s
236 """, (access_token, refresh_token, expires, self.platform, self.user_id))
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gittip/elsewhere/__init__.py b/gittip/elsewhere/__init__.py
--- a/gittip/elsewhere/__init__.py
+++ b/gittip/elsewhere/__init__.py
@@ -77,7 +77,7 @@
Or:
- Takes a user_id and existing_record, and constructs a "model" object out of the record
"""
- typecheck(user_id, (int, unicode), user_info, (None, dict))
+ typecheck(user_id, (int, unicode, long), user_info, (None, dict))
self.user_id = unicode(user_id)
self.db = db
| {"golden_diff": "diff --git a/gittip/elsewhere/__init__.py b/gittip/elsewhere/__init__.py\n--- a/gittip/elsewhere/__init__.py\n+++ b/gittip/elsewhere/__init__.py\n@@ -77,7 +77,7 @@\n Or:\n - Takes a user_id and existing_record, and constructs a \"model\" object out of the record\n \"\"\"\n- typecheck(user_id, (int, unicode), user_info, (None, dict))\n+ typecheck(user_id, (int, unicode, long), user_info, (None, dict))\n self.user_id = unicode(user_id)\n self.db = db\n", "issue": "Sign up with a Twitter account fails.\n```\nInternal server error, program!\nTraceback (most recent call last):\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/algorithm.py\", line 288, in run\n new_state = function(**deps.as_kwargs)\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/algorithms/website.py\", line 88, in get_response_for_resource\n return {'response': resource.respond(request)}\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py\", line 52, in respond\n exec self.pages[1] in context\n File \"/home/sim6/www.gittip.com/www/on/twitter/associate.spt\", line 77, in \n account = twitter.TwitterAccount(website.db, user_info['id'], user_info)\n File \"/home/sim6/www.gittip.com/gittip/elsewhere/__init__.py\", line 80, in __init__\n typecheck(user_id, (int, unicode), user_info, (None, dict))\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/utils.py\", line 377, in typecheck\n raise TypeError(msg)\nTypeError: Check #1: 2304869623L is of type long, not one of: int, unicode.\n```\n\n", "before_files": [{"content": "\"\"\"This subpackage contains functionality for working with accounts elsewhere.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\nfrom collections import OrderedDict\n\nfrom aspen.utils import typecheck\nfrom aspen import json\nfrom psycopg2 import IntegrityError\n\nimport gittip\nfrom gittip.exceptions import ProblemChangingUsername, UnknownPlatform\nfrom gittip.utils.username import reserve_a_random_username\n\n\nACTIONS = [u'opt-in', u'connect', u'lock', u'unlock']\n\n\n# to add a new elsewhere/platform:\n# 1) add its name (also the name of its module) to this list.\n# it's best to append it; this ordering is used in templates.\n# 2) inherit from AccountElsewhere in the platform class\n#\n# platform_modules will populate the platform class automatically in configure-aspen.\nplatforms_ordered = (\n 'twitter',\n 'github',\n 'bitbucket',\n 'bountysource',\n 'venmo',\n 'openstreetmap'\n)\n\n# init-time key setup ensures the future ordering of platform_classes will match\n# platforms_ordered, since overwriting entries will maintain their order.\nplatform_classes = OrderedDict([(platform, None) for platform in platforms_ordered])\n\n\nclass _RegisterPlatformMeta(type):\n \"\"\"Tied to AccountElsewhere to enable registration by the platform field.\n \"\"\"\n\n def __new__(cls, name, bases, dct):\n c = super(_RegisterPlatformMeta, cls).__new__(cls, name, bases, dct)\n\n # * register the platform\n # * verify it was added at init-time\n # * register the subclass's json encoder with aspen\n c_platform = getattr(c, 'platform')\n if name == 'AccountElsewhere':\n pass\n elif c_platform not in platform_classes:\n raise UnknownPlatform(c_platform) # has it been added to platform_classes init?\n else:\n platform_classes[c_platform] = c\n\n # aspen's json encoder registry does not take class hierarchies into account,\n # so we need to register the subclasses explicitly.\n json.register_encoder(c, c.to_json_compatible_object)\n\n return c\n\nclass AccountElsewhere(object):\n\n __metaclass__ = _RegisterPlatformMeta\n\n platform = None # set in subclass\n\n # only fields in this set will be encoded\n json_encode_field_whitelist = set([\n 'id', 'is_locked', 'participant', 'platform', 'user_id', 'user_info',\n ])\n\n def __init__(self, db, user_id, user_info=None, existing_record=None):\n \"\"\"Either:\n - Takes a user_id and user_info, and updates the database.\n\n Or:\n - Takes a user_id and existing_record, and constructs a \"model\" object out of the record\n \"\"\"\n typecheck(user_id, (int, unicode), user_info, (None, dict))\n self.user_id = unicode(user_id)\n self.db = db\n\n if user_info is not None:\n a,b,c,d = self.upsert(user_info)\n\n self.participant = a\n self.is_claimed = b\n self.is_locked = c\n self.balance = d\n\n self.user_info = user_info\n\n # hack to make this into a weird pseudo-model that can share convenience methods\n elif existing_record is not None:\n self.participant = existing_record.participant\n self.is_claimed, self.is_locked, self.balance = self.get_misc_info(self.participant)\n self.user_info = existing_record.user_info\n self.record = existing_record\n\n def to_json_compatible_object(self):\n \"\"\"\n This is registered as an aspen.json encoder in configure-aspen\n for all subclasses of this class.\n\n It only exports fields in the whitelist.\n \"\"\"\n output = {k: v for (k,v) in self.record._asdict().items()\n if k in self.json_encode_field_whitelist}\n\n return output\n\n def set_is_locked(self, is_locked):\n self.db.run(\"\"\"\n\n UPDATE elsewhere\n SET is_locked=%s\n WHERE platform=%s AND user_id=%s\n\n \"\"\", (is_locked, self.platform, self.user_id))\n\n\n def opt_in(self, desired_username):\n \"\"\"Given a desired username, return a User object.\n \"\"\"\n from gittip.security.user import User\n\n self.set_is_locked(False)\n user = User.from_username(self.participant)\n user.sign_in()\n assert not user.ANON, self.participant # sanity check\n if self.is_claimed:\n newly_claimed = False\n else:\n newly_claimed = True\n user.participant.set_as_claimed()\n try:\n user.participant.change_username(desired_username)\n except ProblemChangingUsername:\n pass\n return user, newly_claimed\n\n\n def upsert(self, user_info):\n \"\"\"Given a dict, return a tuple.\n\n User_id is an immutable unique identifier for the given user on the\n given platform. Username is the user's login/username on the given\n platform. It is only used here for logging. Specifically, we don't\n reserve their username for them on Gittip if they're new here. We give\n them a random username here, and they'll have a chance to change it\n if/when they opt in. User_id and username may or may not be the same.\n User_info is a dictionary of profile info per the named platform. All\n platform dicts must have an id key that corresponds to the primary key\n in the underlying table in our own db.\n\n The return value is a tuple: (username [unicode], is_claimed [boolean],\n is_locked [boolean], balance [Decimal]).\n\n \"\"\"\n typecheck(user_info, dict)\n\n\n # Insert the account if needed.\n # =============================\n # Do this with a transaction so that if the insert fails, the\n # participant we reserved for them is rolled back as well.\n\n try:\n with self.db.get_cursor() as cursor:\n _username = reserve_a_random_username(cursor)\n cursor.execute( \"INSERT INTO elsewhere \"\n \"(platform, user_id, participant) \"\n \"VALUES (%s, %s, %s)\"\n , (self.platform, self.user_id, _username)\n )\n except IntegrityError:\n pass\n\n\n # Update their user_info.\n # =======================\n # Cast everything to unicode, because (I believe) hstore can take any\n # type of value, but psycopg2 can't.\n #\n # https://postgres.heroku.com/blog/past/2012/3/14/introducing_keyvalue_data_storage_in_heroku_postgres/\n # http://initd.org/psycopg/docs/extras.html#hstore-data-type\n #\n # XXX This clobbers things, of course, such as booleans. See\n # /on/bitbucket/%username/index.html\n\n for k, v in user_info.items():\n user_info[k] = unicode(v)\n\n\n username = self.db.one(\"\"\"\n\n UPDATE elsewhere\n SET user_info=%s\n WHERE platform=%s AND user_id=%s\n RETURNING participant\n\n \"\"\", (user_info, self.platform, self.user_id))\n\n return (username,) + self.get_misc_info(username)\n\n def get_misc_info(self, username):\n rec = self.db.one(\"\"\"\n\n SELECT claimed_time, balance, is_locked\n FROM participants\n JOIN elsewhere\n ON participants.username=participant\n WHERE platform=%s\n AND participants.username=%s\n\n \"\"\", (self.platform, username))\n\n assert rec is not None # sanity check\n\n return ( rec.claimed_time is not None\n , rec.is_locked\n , rec.balance\n )\n\n def set_oauth_tokens(self, access_token, refresh_token, expires):\n \"\"\"\n Updates the elsewhere row with the given access token, refresh token, and Python datetime\n \"\"\"\n\n self.db.run(\"\"\"\n UPDATE elsewhere \n SET (access_token, refresh_token, expires) \n = (%s, %s, %s) \n WHERE platform=%s AND user_id=%s\n \"\"\", (access_token, refresh_token, expires, self.platform, self.user_id))\n", "path": "gittip/elsewhere/__init__.py"}], "after_files": [{"content": "\"\"\"This subpackage contains functionality for working with accounts elsewhere.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\nfrom collections import OrderedDict\n\nfrom aspen.utils import typecheck\nfrom aspen import json\nfrom psycopg2 import IntegrityError\n\nimport gittip\nfrom gittip.exceptions import ProblemChangingUsername, UnknownPlatform\nfrom gittip.utils.username import reserve_a_random_username\n\n\nACTIONS = [u'opt-in', u'connect', u'lock', u'unlock']\n\n\n# to add a new elsewhere/platform:\n# 1) add its name (also the name of its module) to this list.\n# it's best to append it; this ordering is used in templates.\n# 2) inherit from AccountElsewhere in the platform class\n#\n# platform_modules will populate the platform class automatically in configure-aspen.\nplatforms_ordered = (\n 'twitter',\n 'github',\n 'bitbucket',\n 'bountysource',\n 'venmo',\n 'openstreetmap'\n)\n\n# init-time key setup ensures the future ordering of platform_classes will match\n# platforms_ordered, since overwriting entries will maintain their order.\nplatform_classes = OrderedDict([(platform, None) for platform in platforms_ordered])\n\n\nclass _RegisterPlatformMeta(type):\n \"\"\"Tied to AccountElsewhere to enable registration by the platform field.\n \"\"\"\n\n def __new__(cls, name, bases, dct):\n c = super(_RegisterPlatformMeta, cls).__new__(cls, name, bases, dct)\n\n # * register the platform\n # * verify it was added at init-time\n # * register the subclass's json encoder with aspen\n c_platform = getattr(c, 'platform')\n if name == 'AccountElsewhere':\n pass\n elif c_platform not in platform_classes:\n raise UnknownPlatform(c_platform) # has it been added to platform_classes init?\n else:\n platform_classes[c_platform] = c\n\n # aspen's json encoder registry does not take class hierarchies into account,\n # so we need to register the subclasses explicitly.\n json.register_encoder(c, c.to_json_compatible_object)\n\n return c\n\nclass AccountElsewhere(object):\n\n __metaclass__ = _RegisterPlatformMeta\n\n platform = None # set in subclass\n\n # only fields in this set will be encoded\n json_encode_field_whitelist = set([\n 'id', 'is_locked', 'participant', 'platform', 'user_id', 'user_info',\n ])\n\n def __init__(self, db, user_id, user_info=None, existing_record=None):\n \"\"\"Either:\n - Takes a user_id and user_info, and updates the database.\n\n Or:\n - Takes a user_id and existing_record, and constructs a \"model\" object out of the record\n \"\"\"\n typecheck(user_id, (int, unicode, long), user_info, (None, dict))\n self.user_id = unicode(user_id)\n self.db = db\n\n if user_info is not None:\n a,b,c,d = self.upsert(user_info)\n\n self.participant = a\n self.is_claimed = b\n self.is_locked = c\n self.balance = d\n\n self.user_info = user_info\n\n # hack to make this into a weird pseudo-model that can share convenience methods\n elif existing_record is not None:\n self.participant = existing_record.participant\n self.is_claimed, self.is_locked, self.balance = self.get_misc_info(self.participant)\n self.user_info = existing_record.user_info\n self.record = existing_record\n\n def to_json_compatible_object(self):\n \"\"\"\n This is registered as an aspen.json encoder in configure-aspen\n for all subclasses of this class.\n\n It only exports fields in the whitelist.\n \"\"\"\n output = {k: v for (k,v) in self.record._asdict().items()\n if k in self.json_encode_field_whitelist}\n\n return output\n\n def set_is_locked(self, is_locked):\n self.db.run(\"\"\"\n\n UPDATE elsewhere\n SET is_locked=%s\n WHERE platform=%s AND user_id=%s\n\n \"\"\", (is_locked, self.platform, self.user_id))\n\n\n def opt_in(self, desired_username):\n \"\"\"Given a desired username, return a User object.\n \"\"\"\n from gittip.security.user import User\n\n self.set_is_locked(False)\n user = User.from_username(self.participant)\n user.sign_in()\n assert not user.ANON, self.participant # sanity check\n if self.is_claimed:\n newly_claimed = False\n else:\n newly_claimed = True\n user.participant.set_as_claimed()\n try:\n user.participant.change_username(desired_username)\n except ProblemChangingUsername:\n pass\n return user, newly_claimed\n\n\n def upsert(self, user_info):\n \"\"\"Given a dict, return a tuple.\n\n User_id is an immutable unique identifier for the given user on the\n given platform. Username is the user's login/username on the given\n platform. It is only used here for logging. Specifically, we don't\n reserve their username for them on Gittip if they're new here. We give\n them a random username here, and they'll have a chance to change it\n if/when they opt in. User_id and username may or may not be the same.\n User_info is a dictionary of profile info per the named platform. All\n platform dicts must have an id key that corresponds to the primary key\n in the underlying table in our own db.\n\n The return value is a tuple: (username [unicode], is_claimed [boolean],\n is_locked [boolean], balance [Decimal]).\n\n \"\"\"\n typecheck(user_info, dict)\n\n\n # Insert the account if needed.\n # =============================\n # Do this with a transaction so that if the insert fails, the\n # participant we reserved for them is rolled back as well.\n\n try:\n with self.db.get_cursor() as cursor:\n _username = reserve_a_random_username(cursor)\n cursor.execute( \"INSERT INTO elsewhere \"\n \"(platform, user_id, participant) \"\n \"VALUES (%s, %s, %s)\"\n , (self.platform, self.user_id, _username)\n )\n except IntegrityError:\n pass\n\n\n # Update their user_info.\n # =======================\n # Cast everything to unicode, because (I believe) hstore can take any\n # type of value, but psycopg2 can't.\n #\n # https://postgres.heroku.com/blog/past/2012/3/14/introducing_keyvalue_data_storage_in_heroku_postgres/\n # http://initd.org/psycopg/docs/extras.html#hstore-data-type\n #\n # XXX This clobbers things, of course, such as booleans. See\n # /on/bitbucket/%username/index.html\n\n for k, v in user_info.items():\n user_info[k] = unicode(v)\n\n\n username = self.db.one(\"\"\"\n\n UPDATE elsewhere\n SET user_info=%s\n WHERE platform=%s AND user_id=%s\n RETURNING participant\n\n \"\"\", (user_info, self.platform, self.user_id))\n\n return (username,) + self.get_misc_info(username)\n\n def get_misc_info(self, username):\n rec = self.db.one(\"\"\"\n\n SELECT claimed_time, balance, is_locked\n FROM participants\n JOIN elsewhere\n ON participants.username=participant\n WHERE platform=%s\n AND participants.username=%s\n\n \"\"\", (self.platform, username))\n\n assert rec is not None # sanity check\n\n return ( rec.claimed_time is not None\n , rec.is_locked\n , rec.balance\n )\n\n def set_oauth_tokens(self, access_token, refresh_token, expires):\n \"\"\"\n Updates the elsewhere row with the given access token, refresh token, and Python datetime\n \"\"\"\n\n self.db.run(\"\"\"\n UPDATE elsewhere \n SET (access_token, refresh_token, expires) \n = (%s, %s, %s) \n WHERE platform=%s AND user_id=%s\n \"\"\", (access_token, refresh_token, expires, self.platform, self.user_id))\n", "path": "gittip/elsewhere/__init__.py"}]} | 3,005 | 148 |
gh_patches_debug_22614 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-269 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E2502 is mistaken about IamInstanceProfile
*cfn-lint version: 0.4.2*
*Description of issue.*
Linting a template returned:
```
E2502 Property IamInstanceProfile shouldn't be an ARN for Resources/BuildkiteSpotfleet/Properties/SpotFleetRequestConfigData/LaunchSpecifications/0/IamInstanceProfile/Arn/Fn::GetAtt
```
However that property can be an ARN according to https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications.html#cfn-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications-iaminstanceprofile
It can be an `{"Arn": "profile_arn"}` structure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/iam/InstanceProfile.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class InstanceProfile(CloudFormationLintRule):
22 """Check if IamInstanceProfile are used"""
23 id = 'E2502'
24 shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'
25 description = 'See if there are any properties IamInstanceProfile' + \
26 'are using name and not ARN'
27 source_url = 'https://github.com/awslabs/cfn-python-lint'
28 tags = ['properties']
29
30 def match(self, cfn):
31 """Check CloudFormation IamInstanceProfile Parameters"""
32
33 matches = list()
34
35 # Build the list of keys
36 trees = cfn.search_deep_keys('Fn::GetAtt')
37 # Filter only resoureces
38 # Disable pylint for Pylint 2
39 # pylint: disable=W0110
40 trees = filter(lambda x: x[0] == 'Resources', trees)
41 for tree in trees:
42 if any(e == 'IamInstanceProfile' for e in tree):
43 obj = tree[-1]
44 objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
45 if objtype:
46 if objtype != 'AWS::IAM::InstanceProfile':
47 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
48 '/'.join(map(str, tree[:-1])))
49 matches.append(RuleMatch(tree[:-1], message))
50 else:
51 if obj[1] == 'Arn':
52 message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % (
53 '/'.join(map(str, tree[:-1])))
54 matches.append(RuleMatch(tree[:-1], message))
55
56 # Search Refs
57 trees = cfn.search_deep_keys('Ref')
58 # Filter only resoureces
59 trees = filter(lambda x: x[0] == 'Resources', trees)
60 for tree in trees:
61 if any(e == 'IamInstanceProfile' for e in tree):
62 obj = tree[-1]
63 objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')
64 if objtype:
65 if objtype != 'AWS::IAM::InstanceProfile':
66 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
67 '/'.join(map(str, tree[:-1])))
68 matches.append(RuleMatch(tree[:-1], message))
69
70 return matches
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py
--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py
+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py
@@ -48,10 +48,16 @@
'/'.join(map(str, tree[:-1])))
matches.append(RuleMatch(tree[:-1], message))
else:
- if obj[1] == 'Arn':
- message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % (
- '/'.join(map(str, tree[:-1])))
- matches.append(RuleMatch(tree[:-1], message))
+ if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
+ if obj[1] != 'Arn':
+ message = 'Property IamInstanceProfile should be an ARN for %s' % (
+ '/'.join(map(str, tree[:-1])))
+ matches.append(RuleMatch(tree[:-1], message))
+ else:
+ if obj[1] == 'Arn':
+ message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % (
+ '/'.join(map(str, tree[:-1])))
+ matches.append(RuleMatch(tree[:-1], message))
# Search Refs
trees = cfn.search_deep_keys('Ref')
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py\n+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n@@ -48,10 +48,16 @@\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n- if obj[1] == 'Arn':\n- message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n- '/'.join(map(str, tree[:-1])))\n- matches.append(RuleMatch(tree[:-1], message))\n+ if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n+ if obj[1] != 'Arn':\n+ message = 'Property IamInstanceProfile should be an ARN for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n+ else:\n+ if obj[1] == 'Arn':\n+ message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n \n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n", "issue": "E2502 is mistaken about IamInstanceProfile\n*cfn-lint version: 0.4.2*\r\n\r\n*Description of issue.*\r\n\r\nLinting a template returned:\r\n```\r\nE2502 Property IamInstanceProfile shouldn't be an ARN for Resources/BuildkiteSpotfleet/Properties/SpotFleetRequestConfigData/LaunchSpecifications/0/IamInstanceProfile/Arn/Fn::GetAtt\r\n```\r\n\r\nHowever that property can be an ARN according to https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications.html#cfn-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications-iaminstanceprofile\r\n\r\nIt can be an `{\"Arn\": \"profile_arn\"}` structure.\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = list()\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resoureces\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = list()\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resoureces\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}]} | 1,328 | 329 |
gh_patches_debug_28095 | rasdani/github-patches | git_diff | Qiskit__qiskit-8714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document naming of CircuitInstruction and related classes
### What should we add?
I asked @jakelishman why CircuitInstruction is named that way when it is not an Instruction, and why it has an attribute named `operation` which is actually an Instruction. He replied:
>it’s because what’s currently called Instruction shouldn’t really be called that, but we can’t change it without breaking everything. So instead, the new interface definition for “something that can be added to QuantumCircuit is called Operation, and the container is CircuitInstruction to avoid the naming clash
This should be publicly documented somewhere to mitigate user confusion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/quantumcircuitdata.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """A wrapper class for the purposes of validating modifications to
14 QuantumCircuit.data while maintaining the interface of a python list."""
15
16 from collections.abc import MutableSequence
17 from typing import Tuple, Iterable, Optional
18
19 from .exceptions import CircuitError
20 from .instruction import Instruction
21 from .quantumregister import Qubit
22 from .classicalregister import Clbit
23
24
25 class CircuitInstruction:
26 """A single instruction in a :class:`.QuantumCircuit`, comprised of the :attr:`operation` and
27 various operands.
28
29 .. warning::
30
31 This is a lightweight internal class and there is minimal error checking; you must respect
32 the type hints when using it. It is the user's responsibility to ensure that direct
33 mutations of the object do not invalidate the types, nor the restrictions placed on it by
34 its context. Typically this will mean, for example, that :attr:`qubits` must be a sequence
35 of distinct items, with no duplicates.
36 """
37
38 __slots__ = ("operation", "qubits", "clbits", "_legacy_format_cache")
39
40 operation: Instruction
41 """The logical operation that this instruction represents an execution of."""
42 qubits: Tuple[Qubit, ...]
43 """A sequence of the qubits that the operation is applied to."""
44 clbits: Tuple[Clbit, ...]
45 """A sequence of the classical bits that this operation reads from or writes to."""
46
47 def __init__(
48 self,
49 operation: Instruction,
50 qubits: Iterable[Qubit] = (),
51 clbits: Iterable[Clbit] = (),
52 ):
53 self.operation = operation
54 self.qubits = tuple(qubits)
55 self.clbits = tuple(clbits)
56 self._legacy_format_cache = None
57
58 def copy(self) -> "CircuitInstruction":
59 """Return a shallow copy of the :class:`CircuitInstruction`."""
60 return self.__class__(
61 operation=self.operation,
62 qubits=self.qubits,
63 clbits=self.clbits,
64 )
65
66 def replace(
67 self,
68 operation: Optional[Instruction] = None,
69 qubits: Optional[Iterable[Qubit]] = None,
70 clbits: Optional[Iterable[Clbit]] = None,
71 ) -> "CircuitInstruction":
72 """Return a new :class:`CircuitInstruction` with the given fields replaced."""
73 return self.__class__(
74 operation=self.operation if operation is None else operation,
75 qubits=self.qubits if qubits is None else qubits,
76 clbits=self.clbits if clbits is None else clbits,
77 )
78
79 def __repr__(self):
80 return (
81 f"{type(self).__name__}("
82 f"operation={self.operation!r}"
83 f", qubits={self.qubits!r}"
84 f", clbits={self.clbits!r}"
85 ")"
86 )
87
88 def __eq__(self, other):
89 if isinstance(other, type(self)):
90 # Ordered from fastest comparisons to slowest.
91 return (
92 self.clbits == other.clbits
93 and self.qubits == other.qubits
94 and self.operation == other.operation
95 )
96 if isinstance(other, tuple):
97 return self._legacy_format == other
98 return NotImplemented
99
100 # Legacy tuple-like interface support.
101 #
102 # For a best attempt at API compatibility during the transition to using this new class, we need
103 # the interface to behave exactly like the old 3-tuple `(inst, qargs, cargs)` if it's treated
104 # like that via unpacking or similar. That means that the `parameters` field is completely
105 # absent, and the qubits and clbits must be converted to lists.
106
107 @property
108 def _legacy_format(self):
109 if self._legacy_format_cache is None:
110 # The qubits and clbits were generally stored as lists in the old format, and various
111 # places assume that they will certainly be lists.
112 self._legacy_format_cache = (self.operation, list(self.qubits), list(self.clbits))
113 return self._legacy_format_cache
114
115 def __getitem__(self, key):
116 return self._legacy_format[key]
117
118 def __iter__(self):
119 return iter(self._legacy_format)
120
121 def __len__(self):
122 return 3
123
124
125 class QuantumCircuitData(MutableSequence):
126 """A wrapper class for the purposes of validating modifications to
127 QuantumCircuit.data while maintaining the interface of a python list."""
128
129 def __init__(self, circuit):
130 self._circuit = circuit
131
132 def __getitem__(self, i):
133 return self._circuit._data[i]
134
135 def __setitem__(self, key, value):
136 # For now (Terra 0.21), the `QuantumCircuit.data` setter is meant to perform validation, so
137 # we do the same qubit checks that `QuantumCircuit.append` would do.
138 if isinstance(value, CircuitInstruction):
139 operation, qargs, cargs = value.operation, value.qubits, value.clbits
140 else:
141 # Handle the legacy 3-tuple format.
142 operation, qargs, cargs = value
143 value = self._resolve_legacy_value(operation, qargs, cargs)
144 self._circuit._data[key] = value
145 self._circuit._update_parameter_table(value)
146
147 def _resolve_legacy_value(self, operation, qargs, cargs) -> CircuitInstruction:
148 """Resolve the old-style 3-tuple into the new :class:`CircuitInstruction` type."""
149 if not isinstance(operation, Instruction) and hasattr(operation, "to_instruction"):
150 operation = operation.to_instruction()
151 if not isinstance(operation, Instruction):
152 raise CircuitError("object is not an Instruction.")
153
154 expanded_qargs = [self._circuit.qbit_argument_conversion(qarg) for qarg in qargs or []]
155 expanded_cargs = [self._circuit.cbit_argument_conversion(carg) for carg in cargs or []]
156
157 if isinstance(operation, Instruction):
158 broadcast_args = list(operation.broadcast_arguments(expanded_qargs, expanded_cargs))
159 else:
160 broadcast_args = list(
161 Instruction.broadcast_arguments(operation, expanded_qargs, expanded_cargs)
162 )
163
164 if len(broadcast_args) > 1:
165 raise CircuitError(
166 "QuantumCircuit.data modification does not support argument broadcasting."
167 )
168
169 qargs, cargs = broadcast_args[0]
170
171 self._circuit._check_dups(qargs)
172 return CircuitInstruction(operation, tuple(qargs), tuple(cargs))
173
174 def insert(self, index, value):
175 self._circuit._data.insert(index, None)
176 try:
177 self[index] = value
178 except CircuitError:
179 del self._circuit._data[index]
180 raise
181
182 def __iter__(self):
183 return iter(self._circuit._data)
184
185 def __delitem__(self, i):
186 del self._circuit._data[i]
187
188 def __len__(self):
189 return len(self._circuit._data)
190
191 def __cast(self, other):
192 return other._circuit._data if isinstance(other, QuantumCircuitData) else other
193
194 def __repr__(self):
195 return repr(self._circuit._data)
196
197 def __lt__(self, other):
198 return self._circuit._data < self.__cast(other)
199
200 def __le__(self, other):
201 return self._circuit._data <= self.__cast(other)
202
203 def __eq__(self, other):
204 return self._circuit._data == self.__cast(other)
205
206 def __gt__(self, other):
207 return self._circuit._data > self.__cast(other)
208
209 def __ge__(self, other):
210 return self._circuit._data >= self.__cast(other)
211
212 def __add__(self, other):
213 return self._circuit._data + self.__cast(other)
214
215 def __radd__(self, other):
216 return self.__cast(other) + self._circuit._data
217
218 def __mul__(self, n):
219 return self._circuit._data * n
220
221 def __rmul__(self, n):
222 return n * self._circuit._data
223
224 def sort(self, *args, **kwargs):
225 """In-place stable sort. Accepts arguments of list.sort."""
226 self._circuit._data.sort(*args, **kwargs)
227
228 def copy(self):
229 """Returns a shallow copy of instruction list."""
230 return self._circuit._data.copy()
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/circuit/quantumcircuitdata.py b/qiskit/circuit/quantumcircuitdata.py
--- a/qiskit/circuit/quantumcircuitdata.py
+++ b/qiskit/circuit/quantumcircuitdata.py
@@ -26,6 +26,28 @@
"""A single instruction in a :class:`.QuantumCircuit`, comprised of the :attr:`operation` and
various operands.
+ .. note::
+
+ There is some possible confusion in the names of this class, :class:`~.circuit.Instruction`,
+ and :class:`~.circuit.Operation`, and this class's attribute :attr:`operation`. Our
+ preferred terminology is by analogy to assembly languages, where an "instruction" is made up
+ of an "operation" and its "operands".
+
+ Historically, :class:`~.circuit.Instruction` came first, and originally contained the qubits
+ it operated on and any parameters, so it was a true "instruction". Over time,
+ :class:`.QuantumCircuit` became responsible for tracking qubits and clbits, and the class
+ became better described as an "operation". Changing the name of such a core object would be
+ a very unpleasant API break for users, and so we have stuck with it.
+
+ This class was created to provide a formal "instruction" context object in
+ :class:`.QuantumCircuit.data`, which had long been made of ad-hoc tuples. With this, and
+ the advent of the :class:`~.circuit.Operation` interface for adding more complex objects to
+ circuits, we took the opportunity to correct the historical naming. For the time being,
+ this leads to an awkward case where :attr:`.CircuitInstruction.operation` is often an
+ :class:`~.circuit.Instruction` instance (:class:`~.circuit.Instruction` implements the
+ :class:`.Operation` interface), but as the :class:`.Operation` interface gains more use,
+ this confusion will hopefully abate.
+
.. warning::
This is a lightweight internal class and there is minimal error checking; you must respect
| {"golden_diff": "diff --git a/qiskit/circuit/quantumcircuitdata.py b/qiskit/circuit/quantumcircuitdata.py\n--- a/qiskit/circuit/quantumcircuitdata.py\n+++ b/qiskit/circuit/quantumcircuitdata.py\n@@ -26,6 +26,28 @@\n \"\"\"A single instruction in a :class:`.QuantumCircuit`, comprised of the :attr:`operation` and\n various operands.\n \n+ .. note::\n+\n+ There is some possible confusion in the names of this class, :class:`~.circuit.Instruction`,\n+ and :class:`~.circuit.Operation`, and this class's attribute :attr:`operation`. Our\n+ preferred terminology is by analogy to assembly languages, where an \"instruction\" is made up\n+ of an \"operation\" and its \"operands\".\n+\n+ Historically, :class:`~.circuit.Instruction` came first, and originally contained the qubits\n+ it operated on and any parameters, so it was a true \"instruction\". Over time,\n+ :class:`.QuantumCircuit` became responsible for tracking qubits and clbits, and the class\n+ became better described as an \"operation\". Changing the name of such a core object would be\n+ a very unpleasant API break for users, and so we have stuck with it.\n+\n+ This class was created to provide a formal \"instruction\" context object in\n+ :class:`.QuantumCircuit.data`, which had long been made of ad-hoc tuples. With this, and\n+ the advent of the :class:`~.circuit.Operation` interface for adding more complex objects to\n+ circuits, we took the opportunity to correct the historical naming. For the time being,\n+ this leads to an awkward case where :attr:`.CircuitInstruction.operation` is often an\n+ :class:`~.circuit.Instruction` instance (:class:`~.circuit.Instruction` implements the\n+ :class:`.Operation` interface), but as the :class:`.Operation` interface gains more use,\n+ this confusion will hopefully abate.\n+\n .. warning::\n \n This is a lightweight internal class and there is minimal error checking; you must respect\n", "issue": "Document naming of CircuitInstruction and related classes\n### What should we add?\n\nI asked @jakelishman why CircuitInstruction is named that way when it is not an Instruction, and why it has an attribute named `operation` which is actually an Instruction. He replied:\r\n>it\u2019s because what\u2019s currently called Instruction shouldn\u2019t really be called that, but we can\u2019t change it without breaking everything. So instead, the new interface definition for \u201csomething that can be added to QuantumCircuit is called Operation, and the container is CircuitInstruction to avoid the naming clash\r\n\r\nThis should be publicly documented somewhere to mitigate user confusion.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A wrapper class for the purposes of validating modifications to\nQuantumCircuit.data while maintaining the interface of a python list.\"\"\"\n\nfrom collections.abc import MutableSequence\nfrom typing import Tuple, Iterable, Optional\n\nfrom .exceptions import CircuitError\nfrom .instruction import Instruction\nfrom .quantumregister import Qubit\nfrom .classicalregister import Clbit\n\n\nclass CircuitInstruction:\n \"\"\"A single instruction in a :class:`.QuantumCircuit`, comprised of the :attr:`operation` and\n various operands.\n\n .. warning::\n\n This is a lightweight internal class and there is minimal error checking; you must respect\n the type hints when using it. It is the user's responsibility to ensure that direct\n mutations of the object do not invalidate the types, nor the restrictions placed on it by\n its context. Typically this will mean, for example, that :attr:`qubits` must be a sequence\n of distinct items, with no duplicates.\n \"\"\"\n\n __slots__ = (\"operation\", \"qubits\", \"clbits\", \"_legacy_format_cache\")\n\n operation: Instruction\n \"\"\"The logical operation that this instruction represents an execution of.\"\"\"\n qubits: Tuple[Qubit, ...]\n \"\"\"A sequence of the qubits that the operation is applied to.\"\"\"\n clbits: Tuple[Clbit, ...]\n \"\"\"A sequence of the classical bits that this operation reads from or writes to.\"\"\"\n\n def __init__(\n self,\n operation: Instruction,\n qubits: Iterable[Qubit] = (),\n clbits: Iterable[Clbit] = (),\n ):\n self.operation = operation\n self.qubits = tuple(qubits)\n self.clbits = tuple(clbits)\n self._legacy_format_cache = None\n\n def copy(self) -> \"CircuitInstruction\":\n \"\"\"Return a shallow copy of the :class:`CircuitInstruction`.\"\"\"\n return self.__class__(\n operation=self.operation,\n qubits=self.qubits,\n clbits=self.clbits,\n )\n\n def replace(\n self,\n operation: Optional[Instruction] = None,\n qubits: Optional[Iterable[Qubit]] = None,\n clbits: Optional[Iterable[Clbit]] = None,\n ) -> \"CircuitInstruction\":\n \"\"\"Return a new :class:`CircuitInstruction` with the given fields replaced.\"\"\"\n return self.__class__(\n operation=self.operation if operation is None else operation,\n qubits=self.qubits if qubits is None else qubits,\n clbits=self.clbits if clbits is None else clbits,\n )\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(\"\n f\"operation={self.operation!r}\"\n f\", qubits={self.qubits!r}\"\n f\", clbits={self.clbits!r}\"\n \")\"\n )\n\n def __eq__(self, other):\n if isinstance(other, type(self)):\n # Ordered from fastest comparisons to slowest.\n return (\n self.clbits == other.clbits\n and self.qubits == other.qubits\n and self.operation == other.operation\n )\n if isinstance(other, tuple):\n return self._legacy_format == other\n return NotImplemented\n\n # Legacy tuple-like interface support.\n #\n # For a best attempt at API compatibility during the transition to using this new class, we need\n # the interface to behave exactly like the old 3-tuple `(inst, qargs, cargs)` if it's treated\n # like that via unpacking or similar. That means that the `parameters` field is completely\n # absent, and the qubits and clbits must be converted to lists.\n\n @property\n def _legacy_format(self):\n if self._legacy_format_cache is None:\n # The qubits and clbits were generally stored as lists in the old format, and various\n # places assume that they will certainly be lists.\n self._legacy_format_cache = (self.operation, list(self.qubits), list(self.clbits))\n return self._legacy_format_cache\n\n def __getitem__(self, key):\n return self._legacy_format[key]\n\n def __iter__(self):\n return iter(self._legacy_format)\n\n def __len__(self):\n return 3\n\n\nclass QuantumCircuitData(MutableSequence):\n \"\"\"A wrapper class for the purposes of validating modifications to\n QuantumCircuit.data while maintaining the interface of a python list.\"\"\"\n\n def __init__(self, circuit):\n self._circuit = circuit\n\n def __getitem__(self, i):\n return self._circuit._data[i]\n\n def __setitem__(self, key, value):\n # For now (Terra 0.21), the `QuantumCircuit.data` setter is meant to perform validation, so\n # we do the same qubit checks that `QuantumCircuit.append` would do.\n if isinstance(value, CircuitInstruction):\n operation, qargs, cargs = value.operation, value.qubits, value.clbits\n else:\n # Handle the legacy 3-tuple format.\n operation, qargs, cargs = value\n value = self._resolve_legacy_value(operation, qargs, cargs)\n self._circuit._data[key] = value\n self._circuit._update_parameter_table(value)\n\n def _resolve_legacy_value(self, operation, qargs, cargs) -> CircuitInstruction:\n \"\"\"Resolve the old-style 3-tuple into the new :class:`CircuitInstruction` type.\"\"\"\n if not isinstance(operation, Instruction) and hasattr(operation, \"to_instruction\"):\n operation = operation.to_instruction()\n if not isinstance(operation, Instruction):\n raise CircuitError(\"object is not an Instruction.\")\n\n expanded_qargs = [self._circuit.qbit_argument_conversion(qarg) for qarg in qargs or []]\n expanded_cargs = [self._circuit.cbit_argument_conversion(carg) for carg in cargs or []]\n\n if isinstance(operation, Instruction):\n broadcast_args = list(operation.broadcast_arguments(expanded_qargs, expanded_cargs))\n else:\n broadcast_args = list(\n Instruction.broadcast_arguments(operation, expanded_qargs, expanded_cargs)\n )\n\n if len(broadcast_args) > 1:\n raise CircuitError(\n \"QuantumCircuit.data modification does not support argument broadcasting.\"\n )\n\n qargs, cargs = broadcast_args[0]\n\n self._circuit._check_dups(qargs)\n return CircuitInstruction(operation, tuple(qargs), tuple(cargs))\n\n def insert(self, index, value):\n self._circuit._data.insert(index, None)\n try:\n self[index] = value\n except CircuitError:\n del self._circuit._data[index]\n raise\n\n def __iter__(self):\n return iter(self._circuit._data)\n\n def __delitem__(self, i):\n del self._circuit._data[i]\n\n def __len__(self):\n return len(self._circuit._data)\n\n def __cast(self, other):\n return other._circuit._data if isinstance(other, QuantumCircuitData) else other\n\n def __repr__(self):\n return repr(self._circuit._data)\n\n def __lt__(self, other):\n return self._circuit._data < self.__cast(other)\n\n def __le__(self, other):\n return self._circuit._data <= self.__cast(other)\n\n def __eq__(self, other):\n return self._circuit._data == self.__cast(other)\n\n def __gt__(self, other):\n return self._circuit._data > self.__cast(other)\n\n def __ge__(self, other):\n return self._circuit._data >= self.__cast(other)\n\n def __add__(self, other):\n return self._circuit._data + self.__cast(other)\n\n def __radd__(self, other):\n return self.__cast(other) + self._circuit._data\n\n def __mul__(self, n):\n return self._circuit._data * n\n\n def __rmul__(self, n):\n return n * self._circuit._data\n\n def sort(self, *args, **kwargs):\n \"\"\"In-place stable sort. Accepts arguments of list.sort.\"\"\"\n self._circuit._data.sort(*args, **kwargs)\n\n def copy(self):\n \"\"\"Returns a shallow copy of instruction list.\"\"\"\n return self._circuit._data.copy()\n", "path": "qiskit/circuit/quantumcircuitdata.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A wrapper class for the purposes of validating modifications to\nQuantumCircuit.data while maintaining the interface of a python list.\"\"\"\n\nfrom collections.abc import MutableSequence\nfrom typing import Tuple, Iterable, Optional\n\nfrom .exceptions import CircuitError\nfrom .instruction import Instruction\nfrom .quantumregister import Qubit\nfrom .classicalregister import Clbit\n\n\nclass CircuitInstruction:\n \"\"\"A single instruction in a :class:`.QuantumCircuit`, comprised of the :attr:`operation` and\n various operands.\n\n .. note::\n\n There is some possible confusion in the names of this class, :class:`~.circuit.Instruction`,\n and :class:`~.circuit.Operation`, and this class's attribute :attr:`operation`. Our\n preferred terminology is by analogy to assembly languages, where an \"instruction\" is made up\n of an \"operation\" and its \"operands\".\n\n Historically, :class:`~.circuit.Instruction` came first, and originally contained the qubits\n it operated on and any parameters, so it was a true \"instruction\". Over time,\n :class:`.QuantumCircuit` became responsible for tracking qubits and clbits, and the class\n became better described as an \"operation\". Changing the name of such a core object would be\n a very unpleasant API break for users, and so we have stuck with it.\n\n This class was created to provide a formal \"instruction\" context object in\n :class:`.QuantumCircuit.data`, which had long been made of ad-hoc tuples. With this, and\n the advent of the :class:`~.circuit.Operation` interface for adding more complex objects to\n circuits, we took the opportunity to correct the historical naming. For the time being,\n this leads to an awkward case where :attr:`.CircuitInstruction.operation` is often an\n :class:`~.circuit.Instruction` instance (:class:`~.circuit.Instruction` implements the\n :class:`.Operation` interface), but as the :class:`.Operation` interface gains more use,\n this confusion will hopefully abate.\n\n .. warning::\n\n This is a lightweight internal class and there is minimal error checking; you must respect\n the type hints when using it. It is the user's responsibility to ensure that direct\n mutations of the object do not invalidate the types, nor the restrictions placed on it by\n its context. Typically this will mean, for example, that :attr:`qubits` must be a sequence\n of distinct items, with no duplicates.\n \"\"\"\n\n __slots__ = (\"operation\", \"qubits\", \"clbits\", \"_legacy_format_cache\")\n\n operation: Instruction\n \"\"\"The logical operation that this instruction represents an execution of.\"\"\"\n qubits: Tuple[Qubit, ...]\n \"\"\"A sequence of the qubits that the operation is applied to.\"\"\"\n clbits: Tuple[Clbit, ...]\n \"\"\"A sequence of the classical bits that this operation reads from or writes to.\"\"\"\n\n def __init__(\n self,\n operation: Instruction,\n qubits: Iterable[Qubit] = (),\n clbits: Iterable[Clbit] = (),\n ):\n self.operation = operation\n self.qubits = tuple(qubits)\n self.clbits = tuple(clbits)\n self._legacy_format_cache = None\n\n def copy(self) -> \"CircuitInstruction\":\n \"\"\"Return a shallow copy of the :class:`CircuitInstruction`.\"\"\"\n return self.__class__(\n operation=self.operation,\n qubits=self.qubits,\n clbits=self.clbits,\n )\n\n def replace(\n self,\n operation: Optional[Instruction] = None,\n qubits: Optional[Iterable[Qubit]] = None,\n clbits: Optional[Iterable[Clbit]] = None,\n ) -> \"CircuitInstruction\":\n \"\"\"Return a new :class:`CircuitInstruction` with the given fields replaced.\"\"\"\n return self.__class__(\n operation=self.operation if operation is None else operation,\n qubits=self.qubits if qubits is None else qubits,\n clbits=self.clbits if clbits is None else clbits,\n )\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(\"\n f\"operation={self.operation!r}\"\n f\", qubits={self.qubits!r}\"\n f\", clbits={self.clbits!r}\"\n \")\"\n )\n\n def __eq__(self, other):\n if isinstance(other, type(self)):\n # Ordered from fastest comparisons to slowest.\n return (\n self.clbits == other.clbits\n and self.qubits == other.qubits\n and self.operation == other.operation\n )\n if isinstance(other, tuple):\n return self._legacy_format == other\n return NotImplemented\n\n # Legacy tuple-like interface support.\n #\n # For a best attempt at API compatibility during the transition to using this new class, we need\n # the interface to behave exactly like the old 3-tuple `(inst, qargs, cargs)` if it's treated\n # like that via unpacking or similar. That means that the `parameters` field is completely\n # absent, and the qubits and clbits must be converted to lists.\n\n @property\n def _legacy_format(self):\n if self._legacy_format_cache is None:\n # The qubits and clbits were generally stored as lists in the old format, and various\n # places assume that they will certainly be lists.\n self._legacy_format_cache = (self.operation, list(self.qubits), list(self.clbits))\n return self._legacy_format_cache\n\n def __getitem__(self, key):\n return self._legacy_format[key]\n\n def __iter__(self):\n return iter(self._legacy_format)\n\n def __len__(self):\n return 3\n\n\nclass QuantumCircuitData(MutableSequence):\n \"\"\"A wrapper class for the purposes of validating modifications to\n QuantumCircuit.data while maintaining the interface of a python list.\"\"\"\n\n def __init__(self, circuit):\n self._circuit = circuit\n\n def __getitem__(self, i):\n return self._circuit._data[i]\n\n def __setitem__(self, key, value):\n # For now (Terra 0.21), the `QuantumCircuit.data` setter is meant to perform validation, so\n # we do the same qubit checks that `QuantumCircuit.append` would do.\n if isinstance(value, CircuitInstruction):\n operation, qargs, cargs = value.operation, value.qubits, value.clbits\n else:\n # Handle the legacy 3-tuple format.\n operation, qargs, cargs = value\n value = self._resolve_legacy_value(operation, qargs, cargs)\n self._circuit._data[key] = value\n self._circuit._update_parameter_table(value)\n\n def _resolve_legacy_value(self, operation, qargs, cargs) -> CircuitInstruction:\n \"\"\"Resolve the old-style 3-tuple into the new :class:`CircuitInstruction` type.\"\"\"\n if not isinstance(operation, Instruction) and hasattr(operation, \"to_instruction\"):\n operation = operation.to_instruction()\n if not isinstance(operation, Instruction):\n raise CircuitError(\"object is not an Instruction.\")\n\n expanded_qargs = [self._circuit.qbit_argument_conversion(qarg) for qarg in qargs or []]\n expanded_cargs = [self._circuit.cbit_argument_conversion(carg) for carg in cargs or []]\n\n if isinstance(operation, Instruction):\n broadcast_args = list(operation.broadcast_arguments(expanded_qargs, expanded_cargs))\n else:\n broadcast_args = list(\n Instruction.broadcast_arguments(operation, expanded_qargs, expanded_cargs)\n )\n\n if len(broadcast_args) > 1:\n raise CircuitError(\n \"QuantumCircuit.data modification does not support argument broadcasting.\"\n )\n\n qargs, cargs = broadcast_args[0]\n\n self._circuit._check_dups(qargs)\n return CircuitInstruction(operation, tuple(qargs), tuple(cargs))\n\n def insert(self, index, value):\n self._circuit._data.insert(index, None)\n try:\n self[index] = value\n except CircuitError:\n del self._circuit._data[index]\n raise\n\n def __iter__(self):\n return iter(self._circuit._data)\n\n def __delitem__(self, i):\n del self._circuit._data[i]\n\n def __len__(self):\n return len(self._circuit._data)\n\n def __cast(self, other):\n return other._circuit._data if isinstance(other, QuantumCircuitData) else other\n\n def __repr__(self):\n return repr(self._circuit._data)\n\n def __lt__(self, other):\n return self._circuit._data < self.__cast(other)\n\n def __le__(self, other):\n return self._circuit._data <= self.__cast(other)\n\n def __eq__(self, other):\n return self._circuit._data == self.__cast(other)\n\n def __gt__(self, other):\n return self._circuit._data > self.__cast(other)\n\n def __ge__(self, other):\n return self._circuit._data >= self.__cast(other)\n\n def __add__(self, other):\n return self._circuit._data + self.__cast(other)\n\n def __radd__(self, other):\n return self.__cast(other) + self._circuit._data\n\n def __mul__(self, n):\n return self._circuit._data * n\n\n def __rmul__(self, n):\n return n * self._circuit._data\n\n def sort(self, *args, **kwargs):\n \"\"\"In-place stable sort. Accepts arguments of list.sort.\"\"\"\n self._circuit._data.sort(*args, **kwargs)\n\n def copy(self):\n \"\"\"Returns a shallow copy of instruction list.\"\"\"\n return self._circuit._data.copy()\n", "path": "qiskit/circuit/quantumcircuitdata.py"}]} | 2,940 | 492 |
gh_patches_debug_16214 | rasdani/github-patches | git_diff | InstaPy__InstaPy-504 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pagination not working (I got a quick fix-- class was renamed)
Hey the "load more" button class was renamed yielding this error: 'Load button not found, working with current images!'
The issue was a class name change for the "load more"button. I fixed my bot by updating instapy/like_util.py both lines 35 and 115 (they were identical) from:
('//a[contains(@class, "_8imhp _glz1g")]')
to:
('//a[contains(@class, "_1cr2e _epyes")]')
Pagination not working (I got a quick fix-- class was renamed)
Hey the "load more" button class was renamed yielding this error: 'Load button not found, working with current images!'
The issue was a class name change for the "load more"button. I fixed my bot by updating instapy/like_util.py both lines 35 and 115 (they were identical) from:
('//a[contains(@class, "_8imhp _glz1g")]')
to:
('//a[contains(@class, "_1cr2e _epyes")]')
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instapy/like_util.py`
Content:
```
1 import re
2
3 """Module that handles the like features"""
4 from math import ceil
5 from re import findall
6 from selenium.webdriver.common.keys import Keys
7
8 from .time_util import sleep
9
10
11 def get_links_for_location(browser, location, amount, media=None):
12 """Fetches the number of links specified
13 by amount and returns a list of links"""
14 if media is None:
15 # All known media types
16 media = ['', 'Post', 'Video']
17 elif media == 'Photo':
18 # Include posts with multiple images in it
19 media = ['', 'Post']
20 else:
21 # Make it an array to use it in the following part
22 media = [media]
23
24 browser.get('https://www.instagram.com/explore/locations/'
25 + location)
26 sleep(2)
27
28 # clicking load more
29 body_elem = browser.find_element_by_tag_name('body')
30 sleep(2)
31
32 abort = True
33 try:
34 load_button = body_elem.find_element_by_xpath \
35 ('//a[contains(@class, "_8imhp _glz1g")]')
36 except:
37 print('Load button not found, working with current images!')
38 else:
39 abort = False
40 body_elem.send_keys(Keys.END)
41 sleep(2)
42 load_button.click()
43
44 body_elem.send_keys(Keys.HOME)
45 sleep(1)
46
47 # Get links
48 main_elem = browser.find_element_by_tag_name('main')
49 link_elems = main_elem.find_elements_by_tag_name('a')
50 total_links = len(link_elems)
51 links = [link_elem.get_attribute('href') for link_elem in link_elems
52 if link_elem.text in media]
53 filtered_links = len(links)
54
55 while (filtered_links < amount) and not abort:
56 amount_left = amount - filtered_links
57 # Average items of the right media per page loaded
58 new_per_page = ceil(12 * filtered_links / total_links)
59 if new_per_page == 0:
60 # Avoid division by zero
61 new_per_page = 1. / 12.
62 # Number of page load needed
63 new_needed = int(ceil(amount_left / new_per_page))
64
65 if new_needed > 12:
66 # Don't go bananas trying to get all of instagram!
67 new_needed = 12
68
69 for i in range(new_needed): # add images x * 12
70 # Keep the latest window active while loading more posts
71 before_load = total_links
72 body_elem.send_keys(Keys.END)
73 sleep(1)
74 body_elem.send_keys(Keys.HOME)
75 sleep(1)
76 link_elems = main_elem.find_elements_by_tag_name('a')
77 total_links = len(link_elems)
78 abort = (before_load == total_links)
79 if abort:
80 break
81
82 links = [link_elem.get_attribute('href') for link_elem in link_elems
83 if link_elem.text in media]
84 filtered_links = len(links)
85
86 return links[:amount]
87
88
89 def get_links_for_tag(browser, tag, amount, media=None):
90 """Fetches the number of links specified
91 by amount and returns a list of links"""
92 if media is None:
93 # All known media types
94 media = ['', 'Post', 'Video']
95 elif media == 'Photo':
96 # Include posts with multiple images in it
97 media = ['', 'Post']
98 else:
99 # Make it an array to use it in the following part
100 media = [media]
101
102 browser.get('https://www.instagram.com/explore/tags/'
103 + (tag[1:] if tag[:1] == '#' else tag))
104 sleep(2)
105
106 # clicking load more
107 body_elem = browser.find_element_by_tag_name('body')
108 sleep(2)
109
110 abort = True
111 try:
112 load_button = body_elem.find_element_by_xpath \
113 ('//a[contains(@class, "_8imhp _glz1g")]')
114 except:
115 print('Load button not found, working with current images!')
116 else:
117 abort = False
118 body_elem.send_keys(Keys.END)
119 sleep(2)
120 load_button.click()
121
122 body_elem.send_keys(Keys.HOME)
123 sleep(1)
124
125 # Get links
126 main_elem = browser.find_element_by_tag_name('main')
127 link_elems = main_elem.find_elements_by_tag_name('a')
128 total_links = len(link_elems)
129 links = []
130 filtered_links = 0
131 try:
132 if link_elems:
133 links = [link_elem.get_attribute('href') for link_elem in link_elems
134 if link_elem and link_elem.text in media]
135 filtered_links = len(links)
136
137 except BaseException as e:
138 print("link_elems error \n", str(e))
139
140 while (filtered_links < amount) and not abort:
141 amount_left = amount - filtered_links
142 # Average items of the right media per page loaded
143 new_per_page = ceil(12 * filtered_links / total_links)
144 if new_per_page == 0:
145 # Avoid division by zero
146 new_per_page = 1. / 12.
147 # Number of page load needed
148 new_needed = int(ceil(amount_left / new_per_page))
149
150 if new_needed > 12:
151 # Don't go bananas trying to get all of instagram!
152 new_needed = 12
153
154 for i in range(new_needed): # add images x * 12
155 # Keep the latest window active while loading more posts
156 before_load = total_links
157 body_elem.send_keys(Keys.END)
158 sleep(1)
159 body_elem.send_keys(Keys.HOME)
160 sleep(1)
161 link_elems = main_elem.find_elements_by_tag_name('a')
162 total_links = len(link_elems)
163 abort = (before_load == total_links)
164 if abort:
165 break
166
167 links = [link_elem.get_attribute('href') for link_elem in link_elems
168 if link_elem.text in media]
169 filtered_links = len(links)
170
171 return links[:amount]
172
173
174 def check_link(browser, link, dont_like, ignore_if_contains, ignore_users,
175 username, like_by_followers_upper_limit, like_by_followers_lower_limit):
176 browser.get(link)
177 sleep(2)
178
179 """Check if the Post is Valid/Exists"""
180 post_page = browser.execute_script("return window._sharedData.entry_data.PostPage")
181 if post_page is None:
182 print('Unavailable Page: {}'.format(link.encode('utf-8')))
183 return True, None, None, 'Unavailable Page'
184
185 """Gets the description of the link and checks for the dont_like tags"""
186 graphql = 'graphql' in post_page[0]
187 if graphql:
188 media = post_page[0]['graphql']['shortcode_media']
189 is_video = media['is_video']
190 user_name = media['owner']['username']
191 image_text = media['edge_media_to_caption']['edges']
192 image_text = image_text[0]['node']['text'] if image_text else None
193 owner_comments = browser.execute_script('''
194 latest_comments = window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_comment.edges;
195 if (latest_comments === undefined) latest_comments = Array();
196 owner_comments = latest_comments
197 .filter(item => item.node.owner.username == '{}')
198 .map(item => item.node.text)
199 .reduce((item, total) => item + '\\n' + total, '');
200 return owner_comments;
201 '''.format(user_name))
202 else:
203 media = post_page[0]['media']
204 is_video = media['is_video']
205 user_name = media['owner']['username']
206 image_text = media['caption']
207 owner_comments = browser.execute_script('''
208 latest_comments = window._sharedData.entry_data.PostPage[0].media.comments.nodes;
209 if (latest_comments === undefined) latest_comments = Array();
210 owner_comments = latest_comments
211 .filter(item => item.user.username == '{}')
212 .map(item => item.text)
213 .reduce((item, total) => item + '\\n' + total, '');
214 return owner_comments;
215 '''.format(user_name))
216
217 if owner_comments == '':
218 owner_comments = None
219
220 """Append owner comments to description as it might contain further tags"""
221 if image_text is None:
222 image_text = owner_comments
223 elif owner_comments:
224 image_text = image_text + '\n' + owner_comments
225
226 """If the image still has no description gets the first comment"""
227 if image_text is None:
228 if graphql:
229 image_text = media['edge_media_to_comment']['edges']
230 image_text = image_text[0]['node']['text'] if image_text else None
231 else:
232 image_text = media['comments']['nodes']
233 image_text = image_text[0]['text'] if image_text else None
234 if image_text is None:
235 image_text = "No description"
236
237 print('Image from: {}'.format(user_name.encode('utf-8')))
238
239 """Find the number of followes the user has"""
240 if like_by_followers_upper_limit or like_by_followers_lower_limit:
241 userlink = 'https://www.instagram.com/' + user_name
242 browser.get(userlink)
243 sleep(1)
244 num_followers = browser.execute_script(
245 "return window._sharedData.entry_data.ProfilePage[0].user.followed_by.count")
246 browser.get(link)
247 sleep(1)
248 print('Number of Followers: {}'.format(num_followers))
249
250 if like_by_followers_upper_limit and num_followers > like_by_followers_upper_limit:
251 return True, user_name, is_video, 'Number of followers exceeds limit'
252 if like_by_followers_lower_limit and num_followers < like_by_followers_lower_limit:
253 return True, user_name, is_video, 'Number of followers does not reach minimum'
254
255 print('Link: {}'.format(link.encode('utf-8')))
256 print('Description: {}'.format(image_text.encode('utf-8')))
257
258 """Check if the user_name is in the ignore_users list"""
259 if (user_name in ignore_users) or (user_name == username):
260 return True, user_name, is_video, 'Username'
261
262 if any((word in image_text for word in ignore_if_contains)):
263 return False, user_name, is_video, 'None'
264
265 dont_like_regex = []
266
267 for dont_likes in dont_like:
268 if dont_likes.startswith("#"):
269 dont_like_regex.append(dont_likes + "([^\d\w]|$)")
270 elif dont_likes.startswith("["):
271 dont_like_regex.append("#" + dont_likes[1:] + "[\d\w]+([^\d\w]|$)")
272 elif dont_likes.startswith("]"):
273 dont_like_regex.append("#[\d\w]+" + dont_likes[1:] + "([^\d\w]|$)")
274 else:
275 dont_like_regex.append("#[\d\w]*" + dont_likes + "[\d\w]*([^\d\w]|$)")
276
277 for dont_likes_regex in dont_like_regex:
278 if re.search(dont_likes_regex, image_text, re.IGNORECASE):
279 return True, user_name, is_video, 'Inappropriate'
280
281 return False, user_name, is_video, 'None'
282
283
284 def like_image(browser):
285 """Likes the browser opened image"""
286 like_elem = browser.find_elements_by_xpath("//a[@role = 'button']/span[text()='Like']")
287 liked_elem = browser.find_elements_by_xpath("//a[@role = 'button']/span[text()='Unlike']")
288
289 if len(like_elem) == 1:
290 browser.execute_script(
291 "document.getElementsByClassName('" + like_elem[0].get_attribute("class") + "')[0].click()")
292 print('--> Image Liked!')
293 sleep(2)
294 return True
295 elif len(liked_elem) == 1:
296 print('--> Already Liked!')
297 return False
298 else:
299 print('--> Invalid Like Element!')
300 return False
301
302
303 def get_tags(browser, url):
304 """Gets all the tags of the given description in the url"""
305 browser.get(url)
306 sleep(1)
307
308 graphql = browser.execute_script("return ('graphql' in window._sharedData.entry_data.PostPage[0])")
309 if graphql:
310 image_text = browser.execute_script(
311 "return window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_caption.edges[0].node.text")
312 else:
313 image_text = browser.execute_script("return window._sharedData.entry_data.PostPage[0].media.caption.text")
314
315 tags = findall(r'#\w*', image_text)
316 return tags
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instapy/like_util.py b/instapy/like_util.py
--- a/instapy/like_util.py
+++ b/instapy/like_util.py
@@ -32,7 +32,7 @@
abort = True
try:
load_button = body_elem.find_element_by_xpath \
- ('//a[contains(@class, "_8imhp _glz1g")]')
+ ('//a[contains(@class, "_1cr2e _epyes")]')
except:
print('Load button not found, working with current images!')
else:
@@ -110,7 +110,7 @@
abort = True
try:
load_button = body_elem.find_element_by_xpath \
- ('//a[contains(@class, "_8imhp _glz1g")]')
+ ('//a[contains(@class, "_1cr2e _epyes")]')
except:
print('Load button not found, working with current images!')
else:
| {"golden_diff": "diff --git a/instapy/like_util.py b/instapy/like_util.py\n--- a/instapy/like_util.py\n+++ b/instapy/like_util.py\n@@ -32,7 +32,7 @@\n abort = True\n try:\n load_button = body_elem.find_element_by_xpath \\\n- ('//a[contains(@class, \"_8imhp _glz1g\")]')\n+ ('//a[contains(@class, \"_1cr2e _epyes\")]')\n except:\n print('Load button not found, working with current images!')\n else:\n@@ -110,7 +110,7 @@\n abort = True\n try:\n load_button = body_elem.find_element_by_xpath \\\n- ('//a[contains(@class, \"_8imhp _glz1g\")]')\n+ ('//a[contains(@class, \"_1cr2e _epyes\")]')\n except:\n print('Load button not found, working with current images!')\n else:\n", "issue": "Pagination not working (I got a quick fix-- class was renamed)\nHey the \"load more\" button class was renamed yielding this error: 'Load button not found, working with current images!'\r\n\r\nThe issue was a class name change for the \"load more\"button. I fixed my bot by updating instapy/like_util.py both lines 35 and 115 (they were identical) from:\r\n('//a[contains(@class, \"_8imhp _glz1g\")]')\r\n\r\nto:\r\n('//a[contains(@class, \"_1cr2e _epyes\")]')\nPagination not working (I got a quick fix-- class was renamed)\nHey the \"load more\" button class was renamed yielding this error: 'Load button not found, working with current images!'\r\n\r\nThe issue was a class name change for the \"load more\"button. I fixed my bot by updating instapy/like_util.py both lines 35 and 115 (they were identical) from:\r\n('//a[contains(@class, \"_8imhp _glz1g\")]')\r\n\r\nto:\r\n('//a[contains(@class, \"_1cr2e _epyes\")]')\n", "before_files": [{"content": "import re\n\n\"\"\"Module that handles the like features\"\"\"\nfrom math import ceil\nfrom re import findall\nfrom selenium.webdriver.common.keys import Keys\n\nfrom .time_util import sleep\n\n\ndef get_links_for_location(browser, location, amount, media=None):\n \"\"\"Fetches the number of links specified\n by amount and returns a list of links\"\"\"\n if media is None:\n # All known media types\n media = ['', 'Post', 'Video']\n elif media == 'Photo':\n # Include posts with multiple images in it\n media = ['', 'Post']\n else:\n # Make it an array to use it in the following part\n media = [media]\n\n browser.get('https://www.instagram.com/explore/locations/'\n + location)\n sleep(2)\n\n # clicking load more\n body_elem = browser.find_element_by_tag_name('body')\n sleep(2)\n\n abort = True\n try:\n load_button = body_elem.find_element_by_xpath \\\n ('//a[contains(@class, \"_8imhp _glz1g\")]')\n except:\n print('Load button not found, working with current images!')\n else:\n abort = False\n body_elem.send_keys(Keys.END)\n sleep(2)\n load_button.click()\n\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n\n # Get links\n main_elem = browser.find_element_by_tag_name('main')\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem.text in media]\n filtered_links = len(links)\n\n while (filtered_links < amount) and not abort:\n amount_left = amount - filtered_links\n # Average items of the right media per page loaded\n new_per_page = ceil(12 * filtered_links / total_links)\n if new_per_page == 0:\n # Avoid division by zero\n new_per_page = 1. / 12.\n # Number of page load needed\n new_needed = int(ceil(amount_left / new_per_page))\n\n if new_needed > 12:\n # Don't go bananas trying to get all of instagram!\n new_needed = 12\n\n for i in range(new_needed): # add images x * 12\n # Keep the latest window active while loading more posts\n before_load = total_links\n body_elem.send_keys(Keys.END)\n sleep(1)\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n abort = (before_load == total_links)\n if abort:\n break\n\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem.text in media]\n filtered_links = len(links)\n\n return links[:amount]\n\n\ndef get_links_for_tag(browser, tag, amount, media=None):\n \"\"\"Fetches the number of links specified\n by amount and returns a list of links\"\"\"\n if media is None:\n # All known media types\n media = ['', 'Post', 'Video']\n elif media == 'Photo':\n # Include posts with multiple images in it\n media = ['', 'Post']\n else:\n # Make it an array to use it in the following part\n media = [media]\n\n browser.get('https://www.instagram.com/explore/tags/'\n + (tag[1:] if tag[:1] == '#' else tag))\n sleep(2)\n\n # clicking load more\n body_elem = browser.find_element_by_tag_name('body')\n sleep(2)\n\n abort = True\n try:\n load_button = body_elem.find_element_by_xpath \\\n ('//a[contains(@class, \"_8imhp _glz1g\")]')\n except:\n print('Load button not found, working with current images!')\n else:\n abort = False\n body_elem.send_keys(Keys.END)\n sleep(2)\n load_button.click()\n\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n\n # Get links\n main_elem = browser.find_element_by_tag_name('main')\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n links = []\n filtered_links = 0\n try:\n if link_elems:\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem and link_elem.text in media]\n filtered_links = len(links)\n\n except BaseException as e:\n print(\"link_elems error \\n\", str(e))\n\n while (filtered_links < amount) and not abort:\n amount_left = amount - filtered_links\n # Average items of the right media per page loaded\n new_per_page = ceil(12 * filtered_links / total_links)\n if new_per_page == 0:\n # Avoid division by zero\n new_per_page = 1. / 12.\n # Number of page load needed\n new_needed = int(ceil(amount_left / new_per_page))\n\n if new_needed > 12:\n # Don't go bananas trying to get all of instagram!\n new_needed = 12\n\n for i in range(new_needed): # add images x * 12\n # Keep the latest window active while loading more posts\n before_load = total_links\n body_elem.send_keys(Keys.END)\n sleep(1)\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n abort = (before_load == total_links)\n if abort:\n break\n\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem.text in media]\n filtered_links = len(links)\n\n return links[:amount]\n\n\ndef check_link(browser, link, dont_like, ignore_if_contains, ignore_users,\n username, like_by_followers_upper_limit, like_by_followers_lower_limit):\n browser.get(link)\n sleep(2)\n\n \"\"\"Check if the Post is Valid/Exists\"\"\"\n post_page = browser.execute_script(\"return window._sharedData.entry_data.PostPage\")\n if post_page is None:\n print('Unavailable Page: {}'.format(link.encode('utf-8')))\n return True, None, None, 'Unavailable Page'\n\n \"\"\"Gets the description of the link and checks for the dont_like tags\"\"\"\n graphql = 'graphql' in post_page[0]\n if graphql:\n media = post_page[0]['graphql']['shortcode_media']\n is_video = media['is_video']\n user_name = media['owner']['username']\n image_text = media['edge_media_to_caption']['edges']\n image_text = image_text[0]['node']['text'] if image_text else None\n owner_comments = browser.execute_script('''\n latest_comments = window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_comment.edges;\n if (latest_comments === undefined) latest_comments = Array();\n owner_comments = latest_comments\n .filter(item => item.node.owner.username == '{}')\n .map(item => item.node.text)\n .reduce((item, total) => item + '\\\\n' + total, '');\n return owner_comments;\n '''.format(user_name))\n else:\n media = post_page[0]['media']\n is_video = media['is_video']\n user_name = media['owner']['username']\n image_text = media['caption']\n owner_comments = browser.execute_script('''\n latest_comments = window._sharedData.entry_data.PostPage[0].media.comments.nodes;\n if (latest_comments === undefined) latest_comments = Array();\n owner_comments = latest_comments\n .filter(item => item.user.username == '{}')\n .map(item => item.text)\n .reduce((item, total) => item + '\\\\n' + total, '');\n return owner_comments;\n '''.format(user_name))\n\n if owner_comments == '':\n owner_comments = None\n\n \"\"\"Append owner comments to description as it might contain further tags\"\"\"\n if image_text is None:\n image_text = owner_comments\n elif owner_comments:\n image_text = image_text + '\\n' + owner_comments\n\n \"\"\"If the image still has no description gets the first comment\"\"\"\n if image_text is None:\n if graphql:\n image_text = media['edge_media_to_comment']['edges']\n image_text = image_text[0]['node']['text'] if image_text else None\n else:\n image_text = media['comments']['nodes']\n image_text = image_text[0]['text'] if image_text else None\n if image_text is None:\n image_text = \"No description\"\n\n print('Image from: {}'.format(user_name.encode('utf-8')))\n\n \"\"\"Find the number of followes the user has\"\"\"\n if like_by_followers_upper_limit or like_by_followers_lower_limit:\n userlink = 'https://www.instagram.com/' + user_name\n browser.get(userlink)\n sleep(1)\n num_followers = browser.execute_script(\n \"return window._sharedData.entry_data.ProfilePage[0].user.followed_by.count\")\n browser.get(link)\n sleep(1)\n print('Number of Followers: {}'.format(num_followers))\n\n if like_by_followers_upper_limit and num_followers > like_by_followers_upper_limit:\n return True, user_name, is_video, 'Number of followers exceeds limit'\n if like_by_followers_lower_limit and num_followers < like_by_followers_lower_limit:\n return True, user_name, is_video, 'Number of followers does not reach minimum'\n\n print('Link: {}'.format(link.encode('utf-8')))\n print('Description: {}'.format(image_text.encode('utf-8')))\n\n \"\"\"Check if the user_name is in the ignore_users list\"\"\"\n if (user_name in ignore_users) or (user_name == username):\n return True, user_name, is_video, 'Username'\n\n if any((word in image_text for word in ignore_if_contains)):\n return False, user_name, is_video, 'None'\n\n dont_like_regex = []\n\n for dont_likes in dont_like:\n if dont_likes.startswith(\"#\"):\n dont_like_regex.append(dont_likes + \"([^\\d\\w]|$)\")\n elif dont_likes.startswith(\"[\"):\n dont_like_regex.append(\"#\" + dont_likes[1:] + \"[\\d\\w]+([^\\d\\w]|$)\")\n elif dont_likes.startswith(\"]\"):\n dont_like_regex.append(\"#[\\d\\w]+\" + dont_likes[1:] + \"([^\\d\\w]|$)\")\n else:\n dont_like_regex.append(\"#[\\d\\w]*\" + dont_likes + \"[\\d\\w]*([^\\d\\w]|$)\")\n\n for dont_likes_regex in dont_like_regex:\n if re.search(dont_likes_regex, image_text, re.IGNORECASE):\n return True, user_name, is_video, 'Inappropriate'\n\n return False, user_name, is_video, 'None'\n\n\ndef like_image(browser):\n \"\"\"Likes the browser opened image\"\"\"\n like_elem = browser.find_elements_by_xpath(\"//a[@role = 'button']/span[text()='Like']\")\n liked_elem = browser.find_elements_by_xpath(\"//a[@role = 'button']/span[text()='Unlike']\")\n\n if len(like_elem) == 1:\n browser.execute_script(\n \"document.getElementsByClassName('\" + like_elem[0].get_attribute(\"class\") + \"')[0].click()\")\n print('--> Image Liked!')\n sleep(2)\n return True\n elif len(liked_elem) == 1:\n print('--> Already Liked!')\n return False\n else:\n print('--> Invalid Like Element!')\n return False\n\n\ndef get_tags(browser, url):\n \"\"\"Gets all the tags of the given description in the url\"\"\"\n browser.get(url)\n sleep(1)\n\n graphql = browser.execute_script(\"return ('graphql' in window._sharedData.entry_data.PostPage[0])\")\n if graphql:\n image_text = browser.execute_script(\n \"return window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_caption.edges[0].node.text\")\n else:\n image_text = browser.execute_script(\"return window._sharedData.entry_data.PostPage[0].media.caption.text\")\n\n tags = findall(r'#\\w*', image_text)\n return tags\n", "path": "instapy/like_util.py"}], "after_files": [{"content": "import re\n\n\"\"\"Module that handles the like features\"\"\"\nfrom math import ceil\nfrom re import findall\nfrom selenium.webdriver.common.keys import Keys\n\nfrom .time_util import sleep\n\n\ndef get_links_for_location(browser, location, amount, media=None):\n \"\"\"Fetches the number of links specified\n by amount and returns a list of links\"\"\"\n if media is None:\n # All known media types\n media = ['', 'Post', 'Video']\n elif media == 'Photo':\n # Include posts with multiple images in it\n media = ['', 'Post']\n else:\n # Make it an array to use it in the following part\n media = [media]\n\n browser.get('https://www.instagram.com/explore/locations/'\n + location)\n sleep(2)\n\n # clicking load more\n body_elem = browser.find_element_by_tag_name('body')\n sleep(2)\n\n abort = True\n try:\n load_button = body_elem.find_element_by_xpath \\\n ('//a[contains(@class, \"_1cr2e _epyes\")]')\n except:\n print('Load button not found, working with current images!')\n else:\n abort = False\n body_elem.send_keys(Keys.END)\n sleep(2)\n load_button.click()\n\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n\n # Get links\n main_elem = browser.find_element_by_tag_name('main')\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem.text in media]\n filtered_links = len(links)\n\n while (filtered_links < amount) and not abort:\n amount_left = amount - filtered_links\n # Average items of the right media per page loaded\n new_per_page = ceil(12 * filtered_links / total_links)\n if new_per_page == 0:\n # Avoid division by zero\n new_per_page = 1. / 12.\n # Number of page load needed\n new_needed = int(ceil(amount_left / new_per_page))\n\n if new_needed > 12:\n # Don't go bananas trying to get all of instagram!\n new_needed = 12\n\n for i in range(new_needed): # add images x * 12\n # Keep the latest window active while loading more posts\n before_load = total_links\n body_elem.send_keys(Keys.END)\n sleep(1)\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n abort = (before_load == total_links)\n if abort:\n break\n\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem.text in media]\n filtered_links = len(links)\n\n return links[:amount]\n\n\ndef get_links_for_tag(browser, tag, amount, media=None):\n \"\"\"Fetches the number of links specified\n by amount and returns a list of links\"\"\"\n if media is None:\n # All known media types\n media = ['', 'Post', 'Video']\n elif media == 'Photo':\n # Include posts with multiple images in it\n media = ['', 'Post']\n else:\n # Make it an array to use it in the following part\n media = [media]\n\n browser.get('https://www.instagram.com/explore/tags/'\n + (tag[1:] if tag[:1] == '#' else tag))\n sleep(2)\n\n # clicking load more\n body_elem = browser.find_element_by_tag_name('body')\n sleep(2)\n\n abort = True\n try:\n load_button = body_elem.find_element_by_xpath \\\n ('//a[contains(@class, \"_1cr2e _epyes\")]')\n except:\n print('Load button not found, working with current images!')\n else:\n abort = False\n body_elem.send_keys(Keys.END)\n sleep(2)\n load_button.click()\n\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n\n # Get links\n main_elem = browser.find_element_by_tag_name('main')\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n links = []\n filtered_links = 0\n try:\n if link_elems:\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem and link_elem.text in media]\n filtered_links = len(links)\n\n except BaseException as e:\n print(\"link_elems error \\n\", str(e))\n\n while (filtered_links < amount) and not abort:\n amount_left = amount - filtered_links\n # Average items of the right media per page loaded\n new_per_page = ceil(12 * filtered_links / total_links)\n if new_per_page == 0:\n # Avoid division by zero\n new_per_page = 1. / 12.\n # Number of page load needed\n new_needed = int(ceil(amount_left / new_per_page))\n\n if new_needed > 12:\n # Don't go bananas trying to get all of instagram!\n new_needed = 12\n\n for i in range(new_needed): # add images x * 12\n # Keep the latest window active while loading more posts\n before_load = total_links\n body_elem.send_keys(Keys.END)\n sleep(1)\n body_elem.send_keys(Keys.HOME)\n sleep(1)\n link_elems = main_elem.find_elements_by_tag_name('a')\n total_links = len(link_elems)\n abort = (before_load == total_links)\n if abort:\n break\n\n links = [link_elem.get_attribute('href') for link_elem in link_elems\n if link_elem.text in media]\n filtered_links = len(links)\n\n return links[:amount]\n\n\ndef check_link(browser, link, dont_like, ignore_if_contains, ignore_users,\n username, like_by_followers_upper_limit, like_by_followers_lower_limit):\n browser.get(link)\n sleep(2)\n\n \"\"\"Check if the Post is Valid/Exists\"\"\"\n post_page = browser.execute_script(\"return window._sharedData.entry_data.PostPage\")\n if post_page is None:\n print('Unavailable Page: {}'.format(link.encode('utf-8')))\n return True, None, None, 'Unavailable Page'\n\n \"\"\"Gets the description of the link and checks for the dont_like tags\"\"\"\n graphql = 'graphql' in post_page[0]\n if graphql:\n media = post_page[0]['graphql']['shortcode_media']\n is_video = media['is_video']\n user_name = media['owner']['username']\n image_text = media['edge_media_to_caption']['edges']\n image_text = image_text[0]['node']['text'] if image_text else None\n owner_comments = browser.execute_script('''\n latest_comments = window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_comment.edges;\n if (latest_comments === undefined) latest_comments = Array();\n owner_comments = latest_comments\n .filter(item => item.node.owner.username == '{}')\n .map(item => item.node.text)\n .reduce((item, total) => item + '\\\\n' + total, '');\n return owner_comments;\n '''.format(user_name))\n else:\n media = post_page[0]['media']\n is_video = media['is_video']\n user_name = media['owner']['username']\n image_text = media['caption']\n owner_comments = browser.execute_script('''\n latest_comments = window._sharedData.entry_data.PostPage[0].media.comments.nodes;\n if (latest_comments === undefined) latest_comments = Array();\n owner_comments = latest_comments\n .filter(item => item.user.username == '{}')\n .map(item => item.text)\n .reduce((item, total) => item + '\\\\n' + total, '');\n return owner_comments;\n '''.format(user_name))\n\n if owner_comments == '':\n owner_comments = None\n\n \"\"\"Append owner comments to description as it might contain further tags\"\"\"\n if image_text is None:\n image_text = owner_comments\n elif owner_comments:\n image_text = image_text + '\\n' + owner_comments\n\n \"\"\"If the image still has no description gets the first comment\"\"\"\n if image_text is None:\n if graphql:\n image_text = media['edge_media_to_comment']['edges']\n image_text = image_text[0]['node']['text'] if image_text else None\n else:\n image_text = media['comments']['nodes']\n image_text = image_text[0]['text'] if image_text else None\n if image_text is None:\n image_text = \"No description\"\n\n print('Image from: {}'.format(user_name.encode('utf-8')))\n\n \"\"\"Find the number of followes the user has\"\"\"\n if like_by_followers_upper_limit or like_by_followers_lower_limit:\n userlink = 'https://www.instagram.com/' + user_name\n browser.get(userlink)\n sleep(1)\n num_followers = browser.execute_script(\n \"return window._sharedData.entry_data.ProfilePage[0].user.followed_by.count\")\n browser.get(link)\n sleep(1)\n print('Number of Followers: {}'.format(num_followers))\n\n if like_by_followers_upper_limit and num_followers > like_by_followers_upper_limit:\n return True, user_name, is_video, 'Number of followers exceeds limit'\n if like_by_followers_lower_limit and num_followers < like_by_followers_lower_limit:\n return True, user_name, is_video, 'Number of followers does not reach minimum'\n\n print('Link: {}'.format(link.encode('utf-8')))\n print('Description: {}'.format(image_text.encode('utf-8')))\n\n \"\"\"Check if the user_name is in the ignore_users list\"\"\"\n if (user_name in ignore_users) or (user_name == username):\n return True, user_name, is_video, 'Username'\n\n if any((word in image_text for word in ignore_if_contains)):\n return False, user_name, is_video, 'None'\n\n dont_like_regex = []\n\n for dont_likes in dont_like:\n if dont_likes.startswith(\"#\"):\n dont_like_regex.append(dont_likes + \"([^\\d\\w]|$)\")\n elif dont_likes.startswith(\"[\"):\n dont_like_regex.append(\"#\" + dont_likes[1:] + \"[\\d\\w]+([^\\d\\w]|$)\")\n elif dont_likes.startswith(\"]\"):\n dont_like_regex.append(\"#[\\d\\w]+\" + dont_likes[1:] + \"([^\\d\\w]|$)\")\n else:\n dont_like_regex.append(\"#[\\d\\w]*\" + dont_likes + \"[\\d\\w]*([^\\d\\w]|$)\")\n\n for dont_likes_regex in dont_like_regex:\n if re.search(dont_likes_regex, image_text, re.IGNORECASE):\n return True, user_name, is_video, 'Inappropriate'\n\n return False, user_name, is_video, 'None'\n\n\ndef like_image(browser):\n \"\"\"Likes the browser opened image\"\"\"\n like_elem = browser.find_elements_by_xpath(\"//a[@role = 'button']/span[text()='Like']\")\n liked_elem = browser.find_elements_by_xpath(\"//a[@role = 'button']/span[text()='Unlike']\")\n\n if len(like_elem) == 1:\n browser.execute_script(\n \"document.getElementsByClassName('\" + like_elem[0].get_attribute(\"class\") + \"')[0].click()\")\n print('--> Image Liked!')\n sleep(2)\n return True\n elif len(liked_elem) == 1:\n print('--> Already Liked!')\n return False\n else:\n print('--> Invalid Like Element!')\n return False\n\n\ndef get_tags(browser, url):\n \"\"\"Gets all the tags of the given description in the url\"\"\"\n browser.get(url)\n sleep(1)\n\n graphql = browser.execute_script(\"return ('graphql' in window._sharedData.entry_data.PostPage[0])\")\n if graphql:\n image_text = browser.execute_script(\n \"return window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_caption.edges[0].node.text\")\n else:\n image_text = browser.execute_script(\"return window._sharedData.entry_data.PostPage[0].media.caption.text\")\n\n tags = findall(r'#\\w*', image_text)\n return tags\n", "path": "instapy/like_util.py"}]} | 4,052 | 220 |
gh_patches_debug_7860 | rasdani/github-patches | git_diff | encode__httpx-321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Away-from-origin redirects should add a new host header
Prompted by #310
Away-from-origin redirects currently remove the `Host` header, without adding a new one.
I think we ought to be using `headers['Host'] = url.authority.encode("ascii")` instead of simply deleting the header.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/middleware.py`
Content:
```
1 import functools
2 import typing
3 from base64 import b64encode
4
5 from .config import DEFAULT_MAX_REDIRECTS
6 from .exceptions import RedirectBodyUnavailable, RedirectLoop, TooManyRedirects
7 from .models import URL, AsyncRequest, AsyncResponse, Cookies, Headers
8 from .status_codes import codes
9
10
11 class BaseMiddleware:
12 async def __call__(
13 self, request: AsyncRequest, get_response: typing.Callable
14 ) -> AsyncResponse:
15 raise NotImplementedError # pragma: no cover
16
17
18 class BasicAuthMiddleware(BaseMiddleware):
19 def __init__(
20 self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
21 ):
22 if isinstance(username, str):
23 username = username.encode("latin1")
24
25 if isinstance(password, str):
26 password = password.encode("latin1")
27
28 userpass = b":".join((username, password))
29 token = b64encode(userpass).decode().strip()
30
31 self.authorization_header = f"Basic {token}"
32
33 async def __call__(
34 self, request: AsyncRequest, get_response: typing.Callable
35 ) -> AsyncResponse:
36 request.headers["Authorization"] = self.authorization_header
37 return await get_response(request)
38
39
40 class CustomAuthMiddleware(BaseMiddleware):
41 def __init__(self, auth: typing.Callable[[AsyncRequest], AsyncRequest]):
42 self.auth = auth
43
44 async def __call__(
45 self, request: AsyncRequest, get_response: typing.Callable
46 ) -> AsyncResponse:
47 request = self.auth(request)
48 return await get_response(request)
49
50
51 class RedirectMiddleware(BaseMiddleware):
52 def __init__(
53 self,
54 allow_redirects: bool = True,
55 max_redirects: int = DEFAULT_MAX_REDIRECTS,
56 cookies: typing.Optional[Cookies] = None,
57 ):
58 self.allow_redirects = allow_redirects
59 self.max_redirects = max_redirects
60 self.cookies = cookies
61 self.history: typing.List[AsyncResponse] = []
62
63 async def __call__(
64 self, request: AsyncRequest, get_response: typing.Callable
65 ) -> AsyncResponse:
66 if len(self.history) > self.max_redirects:
67 raise TooManyRedirects()
68 if request.url in (response.url for response in self.history):
69 raise RedirectLoop()
70
71 response = await get_response(request)
72 response.history = list(self.history)
73
74 if not response.is_redirect:
75 return response
76
77 self.history.append(response)
78 next_request = self.build_redirect_request(request, response)
79
80 if self.allow_redirects:
81 return await self(next_request, get_response)
82
83 response.call_next = functools.partial(self, next_request, get_response)
84 return response
85
86 def build_redirect_request(
87 self, request: AsyncRequest, response: AsyncResponse
88 ) -> AsyncRequest:
89 method = self.redirect_method(request, response)
90 url = self.redirect_url(request, response)
91 headers = self.redirect_headers(request, url, method) # TODO: merge headers?
92 content = self.redirect_content(request, method)
93 cookies = Cookies(self.cookies)
94 cookies.update(request.cookies)
95 return AsyncRequest(
96 method=method, url=url, headers=headers, data=content, cookies=cookies
97 )
98
99 def redirect_method(self, request: AsyncRequest, response: AsyncResponse) -> str:
100 """
101 When being redirected we may want to change the method of the request
102 based on certain specs or browser behavior.
103 """
104 method = request.method
105
106 # https://tools.ietf.org/html/rfc7231#section-6.4.4
107 if response.status_code == codes.SEE_OTHER and method != "HEAD":
108 method = "GET"
109
110 # Do what the browsers do, despite standards...
111 # Turn 302s into GETs.
112 if response.status_code == codes.FOUND and method != "HEAD":
113 method = "GET"
114
115 # If a POST is responded to with a 301, turn it into a GET.
116 # This bizarre behaviour is explained in 'requests' issue 1704.
117 if response.status_code == codes.MOVED_PERMANENTLY and method == "POST":
118 method = "GET"
119
120 return method
121
122 def redirect_url(self, request: AsyncRequest, response: AsyncResponse) -> URL:
123 """
124 Return the URL for the redirect to follow.
125 """
126 location = response.headers["Location"]
127
128 url = URL(location, allow_relative=True)
129
130 # Facilitate relative 'Location' headers, as allowed by RFC 7231.
131 # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
132 if url.is_relative_url:
133 url = request.url.join(url)
134
135 # Attach previous fragment if needed (RFC 7231 7.1.2)
136 if request.url.fragment and not url.fragment:
137 url = url.copy_with(fragment=request.url.fragment)
138
139 return url
140
141 def redirect_headers(self, request: AsyncRequest, url: URL, method: str) -> Headers:
142 """
143 Return the headers that should be used for the redirect request.
144 """
145 headers = Headers(request.headers)
146
147 if url.origin != request.url.origin:
148 # Strip Authorization headers when responses are redirected away from
149 # the origin.
150 del headers["Authorization"]
151 del headers["Host"]
152
153 if method != request.method and method == "GET":
154 # If we've switch to a 'GET' request, then strip any headers which
155 # are only relevant to the request body.
156 del headers["Content-Length"]
157 del headers["Transfer-Encoding"]
158
159 return headers
160
161 def redirect_content(self, request: AsyncRequest, method: str) -> bytes:
162 """
163 Return the body that should be used for the redirect request.
164 """
165 if method != request.method and method == "GET":
166 return b""
167 if request.is_streaming:
168 raise RedirectBodyUnavailable()
169 return request.content
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/middleware.py b/httpx/middleware.py
--- a/httpx/middleware.py
+++ b/httpx/middleware.py
@@ -148,7 +148,7 @@
# Strip Authorization headers when responses are redirected away from
# the origin.
del headers["Authorization"]
- del headers["Host"]
+ headers["Host"] = url.authority
if method != request.method and method == "GET":
# If we've switch to a 'GET' request, then strip any headers which
| {"golden_diff": "diff --git a/httpx/middleware.py b/httpx/middleware.py\n--- a/httpx/middleware.py\n+++ b/httpx/middleware.py\n@@ -148,7 +148,7 @@\n # Strip Authorization headers when responses are redirected away from\n # the origin.\n del headers[\"Authorization\"]\n- del headers[\"Host\"]\n+ headers[\"Host\"] = url.authority\n \n if method != request.method and method == \"GET\":\n # If we've switch to a 'GET' request, then strip any headers which\n", "issue": "Away-from-origin redirects should add a new host header\nPrompted by #310\r\n\r\nAway-from-origin redirects currently remove the `Host` header, without adding a new one.\r\nI think we ought to be using `headers['Host'] = url.authority.encode(\"ascii\")` instead of simply deleting the header.\n", "before_files": [{"content": "import functools\nimport typing\nfrom base64 import b64encode\n\nfrom .config import DEFAULT_MAX_REDIRECTS\nfrom .exceptions import RedirectBodyUnavailable, RedirectLoop, TooManyRedirects\nfrom .models import URL, AsyncRequest, AsyncResponse, Cookies, Headers\nfrom .status_codes import codes\n\n\nclass BaseMiddleware:\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n raise NotImplementedError # pragma: no cover\n\n\nclass BasicAuthMiddleware(BaseMiddleware):\n def __init__(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ):\n if isinstance(username, str):\n username = username.encode(\"latin1\")\n\n if isinstance(password, str):\n password = password.encode(\"latin1\")\n\n userpass = b\":\".join((username, password))\n token = b64encode(userpass).decode().strip()\n\n self.authorization_header = f\"Basic {token}\"\n\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n request.headers[\"Authorization\"] = self.authorization_header\n return await get_response(request)\n\n\nclass CustomAuthMiddleware(BaseMiddleware):\n def __init__(self, auth: typing.Callable[[AsyncRequest], AsyncRequest]):\n self.auth = auth\n\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n request = self.auth(request)\n return await get_response(request)\n\n\nclass RedirectMiddleware(BaseMiddleware):\n def __init__(\n self,\n allow_redirects: bool = True,\n max_redirects: int = DEFAULT_MAX_REDIRECTS,\n cookies: typing.Optional[Cookies] = None,\n ):\n self.allow_redirects = allow_redirects\n self.max_redirects = max_redirects\n self.cookies = cookies\n self.history: typing.List[AsyncResponse] = []\n\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n if len(self.history) > self.max_redirects:\n raise TooManyRedirects()\n if request.url in (response.url for response in self.history):\n raise RedirectLoop()\n\n response = await get_response(request)\n response.history = list(self.history)\n\n if not response.is_redirect:\n return response\n\n self.history.append(response)\n next_request = self.build_redirect_request(request, response)\n\n if self.allow_redirects:\n return await self(next_request, get_response)\n\n response.call_next = functools.partial(self, next_request, get_response)\n return response\n\n def build_redirect_request(\n self, request: AsyncRequest, response: AsyncResponse\n ) -> AsyncRequest:\n method = self.redirect_method(request, response)\n url = self.redirect_url(request, response)\n headers = self.redirect_headers(request, url, method) # TODO: merge headers?\n content = self.redirect_content(request, method)\n cookies = Cookies(self.cookies)\n cookies.update(request.cookies)\n return AsyncRequest(\n method=method, url=url, headers=headers, data=content, cookies=cookies\n )\n\n def redirect_method(self, request: AsyncRequest, response: AsyncResponse) -> str:\n \"\"\"\n When being redirected we may want to change the method of the request\n based on certain specs or browser behavior.\n \"\"\"\n method = request.method\n\n # https://tools.ietf.org/html/rfc7231#section-6.4.4\n if response.status_code == codes.SEE_OTHER and method != \"HEAD\":\n method = \"GET\"\n\n # Do what the browsers do, despite standards...\n # Turn 302s into GETs.\n if response.status_code == codes.FOUND and method != \"HEAD\":\n method = \"GET\"\n\n # If a POST is responded to with a 301, turn it into a GET.\n # This bizarre behaviour is explained in 'requests' issue 1704.\n if response.status_code == codes.MOVED_PERMANENTLY and method == \"POST\":\n method = \"GET\"\n\n return method\n\n def redirect_url(self, request: AsyncRequest, response: AsyncResponse) -> URL:\n \"\"\"\n Return the URL for the redirect to follow.\n \"\"\"\n location = response.headers[\"Location\"]\n\n url = URL(location, allow_relative=True)\n\n # Facilitate relative 'Location' headers, as allowed by RFC 7231.\n # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')\n if url.is_relative_url:\n url = request.url.join(url)\n\n # Attach previous fragment if needed (RFC 7231 7.1.2)\n if request.url.fragment and not url.fragment:\n url = url.copy_with(fragment=request.url.fragment)\n\n return url\n\n def redirect_headers(self, request: AsyncRequest, url: URL, method: str) -> Headers:\n \"\"\"\n Return the headers that should be used for the redirect request.\n \"\"\"\n headers = Headers(request.headers)\n\n if url.origin != request.url.origin:\n # Strip Authorization headers when responses are redirected away from\n # the origin.\n del headers[\"Authorization\"]\n del headers[\"Host\"]\n\n if method != request.method and method == \"GET\":\n # If we've switch to a 'GET' request, then strip any headers which\n # are only relevant to the request body.\n del headers[\"Content-Length\"]\n del headers[\"Transfer-Encoding\"]\n\n return headers\n\n def redirect_content(self, request: AsyncRequest, method: str) -> bytes:\n \"\"\"\n Return the body that should be used for the redirect request.\n \"\"\"\n if method != request.method and method == \"GET\":\n return b\"\"\n if request.is_streaming:\n raise RedirectBodyUnavailable()\n return request.content\n", "path": "httpx/middleware.py"}], "after_files": [{"content": "import functools\nimport typing\nfrom base64 import b64encode\n\nfrom .config import DEFAULT_MAX_REDIRECTS\nfrom .exceptions import RedirectBodyUnavailable, RedirectLoop, TooManyRedirects\nfrom .models import URL, AsyncRequest, AsyncResponse, Cookies, Headers\nfrom .status_codes import codes\n\n\nclass BaseMiddleware:\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n raise NotImplementedError # pragma: no cover\n\n\nclass BasicAuthMiddleware(BaseMiddleware):\n def __init__(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ):\n if isinstance(username, str):\n username = username.encode(\"latin1\")\n\n if isinstance(password, str):\n password = password.encode(\"latin1\")\n\n userpass = b\":\".join((username, password))\n token = b64encode(userpass).decode().strip()\n\n self.authorization_header = f\"Basic {token}\"\n\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n request.headers[\"Authorization\"] = self.authorization_header\n return await get_response(request)\n\n\nclass CustomAuthMiddleware(BaseMiddleware):\n def __init__(self, auth: typing.Callable[[AsyncRequest], AsyncRequest]):\n self.auth = auth\n\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n request = self.auth(request)\n return await get_response(request)\n\n\nclass RedirectMiddleware(BaseMiddleware):\n def __init__(\n self,\n allow_redirects: bool = True,\n max_redirects: int = DEFAULT_MAX_REDIRECTS,\n cookies: typing.Optional[Cookies] = None,\n ):\n self.allow_redirects = allow_redirects\n self.max_redirects = max_redirects\n self.cookies = cookies\n self.history: typing.List[AsyncResponse] = []\n\n async def __call__(\n self, request: AsyncRequest, get_response: typing.Callable\n ) -> AsyncResponse:\n if len(self.history) > self.max_redirects:\n raise TooManyRedirects()\n if request.url in (response.url for response in self.history):\n raise RedirectLoop()\n\n response = await get_response(request)\n response.history = list(self.history)\n\n if not response.is_redirect:\n return response\n\n self.history.append(response)\n next_request = self.build_redirect_request(request, response)\n\n if self.allow_redirects:\n return await self(next_request, get_response)\n\n response.call_next = functools.partial(self, next_request, get_response)\n return response\n\n def build_redirect_request(\n self, request: AsyncRequest, response: AsyncResponse\n ) -> AsyncRequest:\n method = self.redirect_method(request, response)\n url = self.redirect_url(request, response)\n headers = self.redirect_headers(request, url, method) # TODO: merge headers?\n content = self.redirect_content(request, method)\n cookies = Cookies(self.cookies)\n cookies.update(request.cookies)\n return AsyncRequest(\n method=method, url=url, headers=headers, data=content, cookies=cookies\n )\n\n def redirect_method(self, request: AsyncRequest, response: AsyncResponse) -> str:\n \"\"\"\n When being redirected we may want to change the method of the request\n based on certain specs or browser behavior.\n \"\"\"\n method = request.method\n\n # https://tools.ietf.org/html/rfc7231#section-6.4.4\n if response.status_code == codes.SEE_OTHER and method != \"HEAD\":\n method = \"GET\"\n\n # Do what the browsers do, despite standards...\n # Turn 302s into GETs.\n if response.status_code == codes.FOUND and method != \"HEAD\":\n method = \"GET\"\n\n # If a POST is responded to with a 301, turn it into a GET.\n # This bizarre behaviour is explained in 'requests' issue 1704.\n if response.status_code == codes.MOVED_PERMANENTLY and method == \"POST\":\n method = \"GET\"\n\n return method\n\n def redirect_url(self, request: AsyncRequest, response: AsyncResponse) -> URL:\n \"\"\"\n Return the URL for the redirect to follow.\n \"\"\"\n location = response.headers[\"Location\"]\n\n url = URL(location, allow_relative=True)\n\n # Facilitate relative 'Location' headers, as allowed by RFC 7231.\n # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')\n if url.is_relative_url:\n url = request.url.join(url)\n\n # Attach previous fragment if needed (RFC 7231 7.1.2)\n if request.url.fragment and not url.fragment:\n url = url.copy_with(fragment=request.url.fragment)\n\n return url\n\n def redirect_headers(self, request: AsyncRequest, url: URL, method: str) -> Headers:\n \"\"\"\n Return the headers that should be used for the redirect request.\n \"\"\"\n headers = Headers(request.headers)\n\n if url.origin != request.url.origin:\n # Strip Authorization headers when responses are redirected away from\n # the origin.\n del headers[\"Authorization\"]\n headers[\"Host\"] = url.authority\n\n if method != request.method and method == \"GET\":\n # If we've switch to a 'GET' request, then strip any headers which\n # are only relevant to the request body.\n del headers[\"Content-Length\"]\n del headers[\"Transfer-Encoding\"]\n\n return headers\n\n def redirect_content(self, request: AsyncRequest, method: str) -> bytes:\n \"\"\"\n Return the body that should be used for the redirect request.\n \"\"\"\n if method != request.method and method == \"GET\":\n return b\"\"\n if request.is_streaming:\n raise RedirectBodyUnavailable()\n return request.content\n", "path": "httpx/middleware.py"}]} | 2,014 | 118 |
gh_patches_debug_3583 | rasdani/github-patches | git_diff | frappe__frappe-14370 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
frappe.db.exists is not available to Server Scripts
**Is your feature request related to a problem? Please describe.**
`frappe.db.exists` is not exposed to **Server Scripts**.
**Describe alternatives you've considered**
Currently I am using `frappe.get_all` with `filters` and `limit=1` to check for existence.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/utils/safe_exec.py`
Content:
```
1
2 import os, json, inspect
3 import mimetypes
4 from html2text import html2text
5 from RestrictedPython import compile_restricted, safe_globals
6 import RestrictedPython.Guards
7 import frappe
8 from frappe import _
9 import frappe.utils
10 import frappe.utils.data
11 from frappe.website.utils import (get_shade, get_toc, get_next_link)
12 from frappe.modules import scrub
13 from frappe.www.printview import get_visible_columns
14 import frappe.exceptions
15 import frappe.integrations.utils
16 from frappe.frappeclient import FrappeClient
17
18 class ServerScriptNotEnabled(frappe.PermissionError):
19 pass
20
21 class NamespaceDict(frappe._dict):
22 """Raise AttributeError if function not found in namespace"""
23 def __getattr__(self, key):
24 ret = self.get(key)
25 if (not ret and key.startswith("__")) or (key not in self):
26 def default_function(*args, **kwargs):
27 raise AttributeError(f"module has no attribute '{key}'")
28 return default_function
29 return ret
30
31
32 def safe_exec(script, _globals=None, _locals=None):
33 # server scripts can be disabled via site_config.json
34 # they are enabled by default
35 if 'server_script_enabled' in frappe.conf:
36 enabled = frappe.conf.server_script_enabled
37 else:
38 enabled = True
39
40 if not enabled:
41 frappe.throw(_('Please Enable Server Scripts'), ServerScriptNotEnabled)
42
43 # build globals
44 exec_globals = get_safe_globals()
45 if _globals:
46 exec_globals.update(_globals)
47
48 # execute script compiled by RestrictedPython
49 exec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used
50
51 return exec_globals, _locals
52
53 def get_safe_globals():
54 datautils = frappe._dict()
55 if frappe.db:
56 date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
57 time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
58 else:
59 date_format = "yyyy-mm-dd"
60 time_format = "HH:mm:ss"
61
62 add_data_utils(datautils)
63
64 if "_" in getattr(frappe.local, 'form_dict', {}):
65 del frappe.local.form_dict["_"]
66
67 user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
68
69 out = NamespaceDict(
70 # make available limited methods of frappe
71 json=NamespaceDict(
72 loads = json.loads,
73 dumps = json.dumps),
74 dict=dict,
75 log=frappe.log,
76 _dict=frappe._dict,
77 frappe=NamespaceDict(
78 flags=frappe._dict(),
79 format=frappe.format_value,
80 format_value=frappe.format_value,
81 date_format=date_format,
82 time_format=time_format,
83 format_date=frappe.utils.data.global_date_format,
84 form_dict=getattr(frappe.local, 'form_dict', {}),
85 bold=frappe.bold,
86 copy_doc=frappe.copy_doc,
87 errprint=frappe.errprint,
88
89 get_meta=frappe.get_meta,
90 get_doc=frappe.get_doc,
91 get_cached_doc=frappe.get_cached_doc,
92 get_list=frappe.get_list,
93 get_all=frappe.get_all,
94 get_system_settings=frappe.get_system_settings,
95 rename_doc=frappe.rename_doc,
96
97 utils=datautils,
98 get_url=frappe.utils.get_url,
99 render_template=frappe.render_template,
100 msgprint=frappe.msgprint,
101 throw=frappe.throw,
102 sendmail = frappe.sendmail,
103 get_print = frappe.get_print,
104 attach_print = frappe.attach_print,
105
106 user=user,
107 get_fullname=frappe.utils.get_fullname,
108 get_gravatar=frappe.utils.get_gravatar_url,
109 full_name=frappe.local.session.data.full_name if getattr(frappe.local, "session", None) else "Guest",
110 request=getattr(frappe.local, 'request', {}),
111 session=frappe._dict(
112 user=user,
113 csrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, "session", None) else ''
114 ),
115 make_get_request = frappe.integrations.utils.make_get_request,
116 make_post_request = frappe.integrations.utils.make_post_request,
117 socketio_port=frappe.conf.socketio_port,
118 get_hooks=frappe.get_hooks,
119 sanitize_html=frappe.utils.sanitize_html,
120 log_error=frappe.log_error
121 ),
122 FrappeClient=FrappeClient,
123 style=frappe._dict(
124 border_color='#d1d8dd'
125 ),
126 get_toc=get_toc,
127 get_next_link=get_next_link,
128 _=frappe._,
129 get_shade=get_shade,
130 scrub=scrub,
131 guess_mimetype=mimetypes.guess_type,
132 html2text=html2text,
133 dev_server=1 if frappe._dev_server else 0,
134 run_script=run_script
135 )
136
137 add_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))
138
139 if not frappe.flags.in_setup_help:
140 out.get_visible_columns = get_visible_columns
141 out.frappe.date_format = date_format
142 out.frappe.time_format = time_format
143 out.frappe.db = NamespaceDict(
144 get_list = frappe.get_list,
145 get_all = frappe.get_all,
146 get_value = frappe.db.get_value,
147 set_value = frappe.db.set_value,
148 get_single_value = frappe.db.get_single_value,
149 get_default = frappe.db.get_default,
150 count = frappe.db.count,
151 min = frappe.db.min,
152 max = frappe.db.max,
153 avg = frappe.db.avg,
154 sum = frappe.db.sum,
155 escape = frappe.db.escape,
156 sql = read_sql
157 )
158
159 if frappe.response:
160 out.frappe.response = frappe.response
161
162 out.update(safe_globals)
163
164 # default writer allows write access
165 out._write_ = _write
166 out._getitem_ = _getitem
167 out._getattr_ = _getattr
168
169 # allow iterators and list comprehension
170 out._getiter_ = iter
171 out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
172 out.sorted = sorted
173
174 return out
175
176 def read_sql(query, *args, **kwargs):
177 '''a wrapper for frappe.db.sql to allow reads'''
178 if query.strip().split(None, 1)[0].lower() == 'select':
179 return frappe.db.sql(query, *args, **kwargs)
180 else:
181 raise frappe.PermissionError('Only SELECT SQL allowed in scripting')
182
183 def run_script(script):
184 '''run another server script'''
185 return frappe.get_doc('Server Script', script).execute_method()
186
187 def _getitem(obj, key):
188 # guard function for RestrictedPython
189 # allow any key to be accessed as long as it does not start with underscore
190 if isinstance(key, str) and key.startswith('_'):
191 raise SyntaxError('Key starts with _')
192 return obj[key]
193
194 def _getattr(object, name, default=None):
195 # guard function for RestrictedPython
196 # allow any key to be accessed as long as
197 # 1. it does not start with an underscore (safer_getattr)
198 # 2. it is not an UNSAFE_ATTRIBUTES
199
200 UNSAFE_ATTRIBUTES = {
201 # Generator Attributes
202 "gi_frame", "gi_code",
203 # Coroutine Attributes
204 "cr_frame", "cr_code", "cr_origin",
205 # Async Generator Attributes
206 "ag_code", "ag_frame",
207 # Traceback Attributes
208 "tb_frame", "tb_next",
209 }
210
211 if isinstance(name, str) and (name in UNSAFE_ATTRIBUTES):
212 raise SyntaxError("{name} is an unsafe attribute".format(name=name))
213 return RestrictedPython.Guards.safer_getattr(object, name, default=default)
214
215 def _write(obj):
216 # guard function for RestrictedPython
217 # allow writing to any object
218 return obj
219
220 def add_data_utils(data):
221 for key, obj in frappe.utils.data.__dict__.items():
222 if key in VALID_UTILS:
223 data[key] = obj
224
225 def add_module_properties(module, data, filter_method):
226 for key, obj in module.__dict__.items():
227 if key.startswith("_"):
228 # ignore
229 continue
230
231 if filter_method(obj):
232 # only allow functions
233 data[key] = obj
234
235 VALID_UTILS = (
236 "DATE_FORMAT",
237 "TIME_FORMAT",
238 "DATETIME_FORMAT",
239 "is_invalid_date_string",
240 "getdate",
241 "get_datetime",
242 "to_timedelta",
243 "get_timedelta",
244 "add_to_date",
245 "add_days",
246 "add_months",
247 "add_years",
248 "date_diff",
249 "month_diff",
250 "time_diff",
251 "time_diff_in_seconds",
252 "time_diff_in_hours",
253 "now_datetime",
254 "get_timestamp",
255 "get_eta",
256 "get_time_zone",
257 "convert_utc_to_user_timezone",
258 "now",
259 "nowdate",
260 "today",
261 "nowtime",
262 "get_first_day",
263 "get_quarter_start",
264 "get_first_day_of_week",
265 "get_year_start",
266 "get_last_day_of_week",
267 "get_last_day",
268 "get_time",
269 "get_datetime_in_timezone",
270 "get_datetime_str",
271 "get_date_str",
272 "get_time_str",
273 "get_user_date_format",
274 "get_user_time_format",
275 "format_date",
276 "format_time",
277 "format_datetime",
278 "format_duration",
279 "get_weekdays",
280 "get_weekday",
281 "get_timespan_date_range",
282 "global_date_format",
283 "has_common",
284 "flt",
285 "cint",
286 "floor",
287 "ceil",
288 "cstr",
289 "rounded",
290 "remainder",
291 "safe_div",
292 "round_based_on_smallest_currency_fraction",
293 "encode",
294 "parse_val",
295 "fmt_money",
296 "get_number_format_info",
297 "money_in_words",
298 "in_words",
299 "is_html",
300 "is_image",
301 "get_thumbnail_base64_for_image",
302 "image_to_base64",
303 "strip_html",
304 "escape_html",
305 "pretty_date",
306 "comma_or",
307 "comma_and",
308 "comma_sep",
309 "new_line_sep",
310 "filter_strip_join",
311 "get_url",
312 "get_host_name_from_request",
313 "url_contains_port",
314 "get_host_name",
315 "get_link_to_form",
316 "get_link_to_report",
317 "get_absolute_url",
318 "get_url_to_form",
319 "get_url_to_list",
320 "get_url_to_report",
321 "get_url_to_report_with_filters",
322 "evaluate_filters",
323 "compare",
324 "get_filter",
325 "make_filter_tuple",
326 "make_filter_dict",
327 "sanitize_column",
328 "scrub_urls",
329 "expand_relative_urls",
330 "quoted",
331 "quote_urls",
332 "unique",
333 "strip",
334 "to_markdown",
335 "md_to_html",
336 "markdown",
337 "is_subset",
338 "generate_hash",
339 "formatdate",
340 "get_user_info_for_avatar",
341 "get_abbr"
342 )
343
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/utils/safe_exec.py b/frappe/utils/safe_exec.py
--- a/frappe/utils/safe_exec.py
+++ b/frappe/utils/safe_exec.py
@@ -147,6 +147,7 @@
set_value = frappe.db.set_value,
get_single_value = frappe.db.get_single_value,
get_default = frappe.db.get_default,
+ exists = frappe.db.exists,
count = frappe.db.count,
min = frappe.db.min,
max = frappe.db.max,
| {"golden_diff": "diff --git a/frappe/utils/safe_exec.py b/frappe/utils/safe_exec.py\n--- a/frappe/utils/safe_exec.py\n+++ b/frappe/utils/safe_exec.py\n@@ -147,6 +147,7 @@\n \t\t\tset_value = frappe.db.set_value,\n \t\t\tget_single_value = frappe.db.get_single_value,\n \t\t\tget_default = frappe.db.get_default,\n+\t\t\texists = frappe.db.exists,\n \t\t\tcount = frappe.db.count,\n \t\t\tmin = frappe.db.min,\n \t\t\tmax = frappe.db.max,\n", "issue": "frappe.db.exists is not available to Server Scripts\n**Is your feature request related to a problem? Please describe.**\r\n`frappe.db.exists` is not exposed to **Server Scripts**.\r\n\r\n**Describe alternatives you've considered**\r\nCurrently I am using `frappe.get_all` with `filters` and `limit=1` to check for existence.\r\n\n", "before_files": [{"content": "\nimport os, json, inspect\nimport mimetypes\nfrom html2text import html2text\nfrom RestrictedPython import compile_restricted, safe_globals\nimport RestrictedPython.Guards\nimport frappe\nfrom frappe import _\nimport frappe.utils\nimport frappe.utils.data\nfrom frappe.website.utils import (get_shade, get_toc, get_next_link)\nfrom frappe.modules import scrub\nfrom frappe.www.printview import get_visible_columns\nimport frappe.exceptions\nimport frappe.integrations.utils\nfrom frappe.frappeclient import FrappeClient\n\nclass ServerScriptNotEnabled(frappe.PermissionError):\n\tpass\n\nclass NamespaceDict(frappe._dict):\n\t\"\"\"Raise AttributeError if function not found in namespace\"\"\"\n\tdef __getattr__(self, key):\n\t\tret = self.get(key)\n\t\tif (not ret and key.startswith(\"__\")) or (key not in self):\n\t\t\tdef default_function(*args, **kwargs):\n\t\t\t\traise AttributeError(f\"module has no attribute '{key}'\")\n\t\t\treturn default_function\n\t\treturn ret\n\n\ndef safe_exec(script, _globals=None, _locals=None):\n\t# server scripts can be disabled via site_config.json\n\t# they are enabled by default\n\tif 'server_script_enabled' in frappe.conf:\n\t\tenabled = frappe.conf.server_script_enabled\n\telse:\n\t\tenabled = True\n\n\tif not enabled:\n\t\tfrappe.throw(_('Please Enable Server Scripts'), ServerScriptNotEnabled)\n\n\t# build globals\n\texec_globals = get_safe_globals()\n\tif _globals:\n\t\texec_globals.update(_globals)\n\n\t# execute script compiled by RestrictedPython\n\texec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used\n\n\treturn exec_globals, _locals\n\ndef get_safe_globals():\n\tdatautils = frappe._dict()\n\tif frappe.db:\n\t\tdate_format = frappe.db.get_default(\"date_format\") or \"yyyy-mm-dd\"\n\t\ttime_format = frappe.db.get_default(\"time_format\") or \"HH:mm:ss\"\n\telse:\n\t\tdate_format = \"yyyy-mm-dd\"\n\t\ttime_format = \"HH:mm:ss\"\n\n\tadd_data_utils(datautils)\n\n\tif \"_\" in getattr(frappe.local, 'form_dict', {}):\n\t\tdel frappe.local.form_dict[\"_\"]\n\n\tuser = getattr(frappe.local, \"session\", None) and frappe.local.session.user or \"Guest\"\n\n\tout = NamespaceDict(\n\t\t# make available limited methods of frappe\n\t\tjson=NamespaceDict(\n\t\t\tloads = json.loads,\n\t\t\tdumps = json.dumps),\n\t\tdict=dict,\n\t\tlog=frappe.log,\n\t\t_dict=frappe._dict,\n\t\tfrappe=NamespaceDict(\n\t\t\tflags=frappe._dict(),\n\t\t\tformat=frappe.format_value,\n\t\t\tformat_value=frappe.format_value,\n\t\t\tdate_format=date_format,\n\t\t\ttime_format=time_format,\n\t\t\tformat_date=frappe.utils.data.global_date_format,\n\t\t\tform_dict=getattr(frappe.local, 'form_dict', {}),\n\t\t\tbold=frappe.bold,\n\t\t\tcopy_doc=frappe.copy_doc,\n\t\t\terrprint=frappe.errprint,\n\n\t\t\tget_meta=frappe.get_meta,\n\t\t\tget_doc=frappe.get_doc,\n\t\t\tget_cached_doc=frappe.get_cached_doc,\n\t\t\tget_list=frappe.get_list,\n\t\t\tget_all=frappe.get_all,\n\t\t\tget_system_settings=frappe.get_system_settings,\n\t\t\trename_doc=frappe.rename_doc,\n\n\t\t\tutils=datautils,\n\t\t\tget_url=frappe.utils.get_url,\n\t\t\trender_template=frappe.render_template,\n\t\t\tmsgprint=frappe.msgprint,\n\t\t\tthrow=frappe.throw,\n\t\t\tsendmail = frappe.sendmail,\n\t\t\tget_print = frappe.get_print,\n\t\t\tattach_print = frappe.attach_print,\n\n\t\t\tuser=user,\n\t\t\tget_fullname=frappe.utils.get_fullname,\n\t\t\tget_gravatar=frappe.utils.get_gravatar_url,\n\t\t\tfull_name=frappe.local.session.data.full_name if getattr(frappe.local, \"session\", None) else \"Guest\",\n\t\t\trequest=getattr(frappe.local, 'request', {}),\n\t\t\tsession=frappe._dict(\n\t\t\t\tuser=user,\n\t\t\t\tcsrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, \"session\", None) else ''\n\t\t\t),\n\t\t\tmake_get_request = frappe.integrations.utils.make_get_request,\n\t\t\tmake_post_request = frappe.integrations.utils.make_post_request,\n\t\t\tsocketio_port=frappe.conf.socketio_port,\n\t\t\tget_hooks=frappe.get_hooks,\n\t\t\tsanitize_html=frappe.utils.sanitize_html,\n\t\t\tlog_error=frappe.log_error\n\t\t),\n\t\tFrappeClient=FrappeClient,\n\t\tstyle=frappe._dict(\n\t\t\tborder_color='#d1d8dd'\n\t\t),\n\t\tget_toc=get_toc,\n\t\tget_next_link=get_next_link,\n\t\t_=frappe._,\n\t\tget_shade=get_shade,\n\t\tscrub=scrub,\n\t\tguess_mimetype=mimetypes.guess_type,\n\t\thtml2text=html2text,\n\t\tdev_server=1 if frappe._dev_server else 0,\n\t\trun_script=run_script\n\t)\n\n\tadd_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))\n\n\tif not frappe.flags.in_setup_help:\n\t\tout.get_visible_columns = get_visible_columns\n\t\tout.frappe.date_format = date_format\n\t\tout.frappe.time_format = time_format\n\t\tout.frappe.db = NamespaceDict(\n\t\t\tget_list = frappe.get_list,\n\t\t\tget_all = frappe.get_all,\n\t\t\tget_value = frappe.db.get_value,\n\t\t\tset_value = frappe.db.set_value,\n\t\t\tget_single_value = frappe.db.get_single_value,\n\t\t\tget_default = frappe.db.get_default,\n\t\t\tcount = frappe.db.count,\n\t\t\tmin = frappe.db.min,\n\t\t\tmax = frappe.db.max,\n\t\t\tavg = frappe.db.avg,\n\t\t\tsum = frappe.db.sum,\n\t\t\tescape = frappe.db.escape,\n\t\t\tsql = read_sql\n\t\t)\n\n\tif frappe.response:\n\t\tout.frappe.response = frappe.response\n\n\tout.update(safe_globals)\n\n\t# default writer allows write access\n\tout._write_ = _write\n\tout._getitem_ = _getitem\n\tout._getattr_ = _getattr\n\n\t# allow iterators and list comprehension\n\tout._getiter_ = iter\n\tout._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence\n\tout.sorted = sorted\n\n\treturn out\n\ndef read_sql(query, *args, **kwargs):\n\t'''a wrapper for frappe.db.sql to allow reads'''\n\tif query.strip().split(None, 1)[0].lower() == 'select':\n\t\treturn frappe.db.sql(query, *args, **kwargs)\n\telse:\n\t\traise frappe.PermissionError('Only SELECT SQL allowed in scripting')\n\ndef run_script(script):\n\t'''run another server script'''\n\treturn frappe.get_doc('Server Script', script).execute_method()\n\ndef _getitem(obj, key):\n\t# guard function for RestrictedPython\n\t# allow any key to be accessed as long as it does not start with underscore\n\tif isinstance(key, str) and key.startswith('_'):\n\t\traise SyntaxError('Key starts with _')\n\treturn obj[key]\n\ndef _getattr(object, name, default=None):\n\t# guard function for RestrictedPython\n\t# allow any key to be accessed as long as\n\t# 1. it does not start with an underscore (safer_getattr)\n\t# 2. it is not an UNSAFE_ATTRIBUTES\n\n\tUNSAFE_ATTRIBUTES = {\n\t\t# Generator Attributes\n\t\t\"gi_frame\", \"gi_code\",\n\t\t# Coroutine Attributes\n\t\t\"cr_frame\", \"cr_code\", \"cr_origin\",\n\t\t# Async Generator Attributes\n\t\t\"ag_code\", \"ag_frame\",\n\t\t# Traceback Attributes\n\t\t\"tb_frame\", \"tb_next\",\n\t}\n\n\tif isinstance(name, str) and (name in UNSAFE_ATTRIBUTES):\n\t\traise SyntaxError(\"{name} is an unsafe attribute\".format(name=name))\n\treturn RestrictedPython.Guards.safer_getattr(object, name, default=default)\n\ndef _write(obj):\n\t# guard function for RestrictedPython\n\t# allow writing to any object\n\treturn obj\n\ndef add_data_utils(data):\n\tfor key, obj in frappe.utils.data.__dict__.items():\n\t\tif key in VALID_UTILS:\n\t\t\tdata[key] = obj\n\ndef add_module_properties(module, data, filter_method):\n\tfor key, obj in module.__dict__.items():\n\t\tif key.startswith(\"_\"):\n\t\t\t# ignore\n\t\t\tcontinue\n\n\t\tif filter_method(obj):\n\t\t\t# only allow functions\n\t\t\tdata[key] = obj\n\nVALID_UTILS = (\n\"DATE_FORMAT\",\n\"TIME_FORMAT\",\n\"DATETIME_FORMAT\",\n\"is_invalid_date_string\",\n\"getdate\",\n\"get_datetime\",\n\"to_timedelta\",\n\"get_timedelta\",\n\"add_to_date\",\n\"add_days\",\n\"add_months\",\n\"add_years\",\n\"date_diff\",\n\"month_diff\",\n\"time_diff\",\n\"time_diff_in_seconds\",\n\"time_diff_in_hours\",\n\"now_datetime\",\n\"get_timestamp\",\n\"get_eta\",\n\"get_time_zone\",\n\"convert_utc_to_user_timezone\",\n\"now\",\n\"nowdate\",\n\"today\",\n\"nowtime\",\n\"get_first_day\",\n\"get_quarter_start\",\n\"get_first_day_of_week\",\n\"get_year_start\",\n\"get_last_day_of_week\",\n\"get_last_day\",\n\"get_time\",\n\"get_datetime_in_timezone\",\n\"get_datetime_str\",\n\"get_date_str\",\n\"get_time_str\",\n\"get_user_date_format\",\n\"get_user_time_format\",\n\"format_date\",\n\"format_time\",\n\"format_datetime\",\n\"format_duration\",\n\"get_weekdays\",\n\"get_weekday\",\n\"get_timespan_date_range\",\n\"global_date_format\",\n\"has_common\",\n\"flt\",\n\"cint\",\n\"floor\",\n\"ceil\",\n\"cstr\",\n\"rounded\",\n\"remainder\",\n\"safe_div\",\n\"round_based_on_smallest_currency_fraction\",\n\"encode\",\n\"parse_val\",\n\"fmt_money\",\n\"get_number_format_info\",\n\"money_in_words\",\n\"in_words\",\n\"is_html\",\n\"is_image\",\n\"get_thumbnail_base64_for_image\",\n\"image_to_base64\",\n\"strip_html\",\n\"escape_html\",\n\"pretty_date\",\n\"comma_or\",\n\"comma_and\",\n\"comma_sep\",\n\"new_line_sep\",\n\"filter_strip_join\",\n\"get_url\",\n\"get_host_name_from_request\",\n\"url_contains_port\",\n\"get_host_name\",\n\"get_link_to_form\",\n\"get_link_to_report\",\n\"get_absolute_url\",\n\"get_url_to_form\",\n\"get_url_to_list\",\n\"get_url_to_report\",\n\"get_url_to_report_with_filters\",\n\"evaluate_filters\",\n\"compare\",\n\"get_filter\",\n\"make_filter_tuple\",\n\"make_filter_dict\",\n\"sanitize_column\",\n\"scrub_urls\",\n\"expand_relative_urls\",\n\"quoted\",\n\"quote_urls\",\n\"unique\",\n\"strip\",\n\"to_markdown\",\n\"md_to_html\",\n\"markdown\",\n\"is_subset\",\n\"generate_hash\",\n\"formatdate\",\n\"get_user_info_for_avatar\",\n\"get_abbr\"\n)\n", "path": "frappe/utils/safe_exec.py"}], "after_files": [{"content": "\nimport os, json, inspect\nimport mimetypes\nfrom html2text import html2text\nfrom RestrictedPython import compile_restricted, safe_globals\nimport RestrictedPython.Guards\nimport frappe\nfrom frappe import _\nimport frappe.utils\nimport frappe.utils.data\nfrom frappe.website.utils import (get_shade, get_toc, get_next_link)\nfrom frappe.modules import scrub\nfrom frappe.www.printview import get_visible_columns\nimport frappe.exceptions\nimport frappe.integrations.utils\nfrom frappe.frappeclient import FrappeClient\n\nclass ServerScriptNotEnabled(frappe.PermissionError):\n\tpass\n\nclass NamespaceDict(frappe._dict):\n\t\"\"\"Raise AttributeError if function not found in namespace\"\"\"\n\tdef __getattr__(self, key):\n\t\tret = self.get(key)\n\t\tif (not ret and key.startswith(\"__\")) or (key not in self):\n\t\t\tdef default_function(*args, **kwargs):\n\t\t\t\traise AttributeError(f\"module has no attribute '{key}'\")\n\t\t\treturn default_function\n\t\treturn ret\n\n\ndef safe_exec(script, _globals=None, _locals=None):\n\t# server scripts can be disabled via site_config.json\n\t# they are enabled by default\n\tif 'server_script_enabled' in frappe.conf:\n\t\tenabled = frappe.conf.server_script_enabled\n\telse:\n\t\tenabled = True\n\n\tif not enabled:\n\t\tfrappe.throw(_('Please Enable Server Scripts'), ServerScriptNotEnabled)\n\n\t# build globals\n\texec_globals = get_safe_globals()\n\tif _globals:\n\t\texec_globals.update(_globals)\n\n\t# execute script compiled by RestrictedPython\n\texec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used\n\n\treturn exec_globals, _locals\n\ndef get_safe_globals():\n\tdatautils = frappe._dict()\n\tif frappe.db:\n\t\tdate_format = frappe.db.get_default(\"date_format\") or \"yyyy-mm-dd\"\n\t\ttime_format = frappe.db.get_default(\"time_format\") or \"HH:mm:ss\"\n\telse:\n\t\tdate_format = \"yyyy-mm-dd\"\n\t\ttime_format = \"HH:mm:ss\"\n\n\tadd_data_utils(datautils)\n\n\tif \"_\" in getattr(frappe.local, 'form_dict', {}):\n\t\tdel frappe.local.form_dict[\"_\"]\n\n\tuser = getattr(frappe.local, \"session\", None) and frappe.local.session.user or \"Guest\"\n\n\tout = NamespaceDict(\n\t\t# make available limited methods of frappe\n\t\tjson=NamespaceDict(\n\t\t\tloads = json.loads,\n\t\t\tdumps = json.dumps),\n\t\tdict=dict,\n\t\tlog=frappe.log,\n\t\t_dict=frappe._dict,\n\t\tfrappe=NamespaceDict(\n\t\t\tflags=frappe._dict(),\n\t\t\tformat=frappe.format_value,\n\t\t\tformat_value=frappe.format_value,\n\t\t\tdate_format=date_format,\n\t\t\ttime_format=time_format,\n\t\t\tformat_date=frappe.utils.data.global_date_format,\n\t\t\tform_dict=getattr(frappe.local, 'form_dict', {}),\n\t\t\tbold=frappe.bold,\n\t\t\tcopy_doc=frappe.copy_doc,\n\t\t\terrprint=frappe.errprint,\n\n\t\t\tget_meta=frappe.get_meta,\n\t\t\tget_doc=frappe.get_doc,\n\t\t\tget_cached_doc=frappe.get_cached_doc,\n\t\t\tget_list=frappe.get_list,\n\t\t\tget_all=frappe.get_all,\n\t\t\tget_system_settings=frappe.get_system_settings,\n\t\t\trename_doc=frappe.rename_doc,\n\n\t\t\tutils=datautils,\n\t\t\tget_url=frappe.utils.get_url,\n\t\t\trender_template=frappe.render_template,\n\t\t\tmsgprint=frappe.msgprint,\n\t\t\tthrow=frappe.throw,\n\t\t\tsendmail = frappe.sendmail,\n\t\t\tget_print = frappe.get_print,\n\t\t\tattach_print = frappe.attach_print,\n\n\t\t\tuser=user,\n\t\t\tget_fullname=frappe.utils.get_fullname,\n\t\t\tget_gravatar=frappe.utils.get_gravatar_url,\n\t\t\tfull_name=frappe.local.session.data.full_name if getattr(frappe.local, \"session\", None) else \"Guest\",\n\t\t\trequest=getattr(frappe.local, 'request', {}),\n\t\t\tsession=frappe._dict(\n\t\t\t\tuser=user,\n\t\t\t\tcsrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, \"session\", None) else ''\n\t\t\t),\n\t\t\tmake_get_request = frappe.integrations.utils.make_get_request,\n\t\t\tmake_post_request = frappe.integrations.utils.make_post_request,\n\t\t\tsocketio_port=frappe.conf.socketio_port,\n\t\t\tget_hooks=frappe.get_hooks,\n\t\t\tsanitize_html=frappe.utils.sanitize_html,\n\t\t\tlog_error=frappe.log_error\n\t\t),\n\t\tFrappeClient=FrappeClient,\n\t\tstyle=frappe._dict(\n\t\t\tborder_color='#d1d8dd'\n\t\t),\n\t\tget_toc=get_toc,\n\t\tget_next_link=get_next_link,\n\t\t_=frappe._,\n\t\tget_shade=get_shade,\n\t\tscrub=scrub,\n\t\tguess_mimetype=mimetypes.guess_type,\n\t\thtml2text=html2text,\n\t\tdev_server=1 if frappe._dev_server else 0,\n\t\trun_script=run_script\n\t)\n\n\tadd_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))\n\n\tif not frappe.flags.in_setup_help:\n\t\tout.get_visible_columns = get_visible_columns\n\t\tout.frappe.date_format = date_format\n\t\tout.frappe.time_format = time_format\n\t\tout.frappe.db = NamespaceDict(\n\t\t\tget_list = frappe.get_list,\n\t\t\tget_all = frappe.get_all,\n\t\t\tget_value = frappe.db.get_value,\n\t\t\tset_value = frappe.db.set_value,\n\t\t\tget_single_value = frappe.db.get_single_value,\n\t\t\tget_default = frappe.db.get_default,\n\t\t\texists = frappe.db.exists,\n\t\t\tcount = frappe.db.count,\n\t\t\tmin = frappe.db.min,\n\t\t\tmax = frappe.db.max,\n\t\t\tavg = frappe.db.avg,\n\t\t\tsum = frappe.db.sum,\n\t\t\tescape = frappe.db.escape,\n\t\t\tsql = read_sql\n\t\t)\n\n\tif frappe.response:\n\t\tout.frappe.response = frappe.response\n\n\tout.update(safe_globals)\n\n\t# default writer allows write access\n\tout._write_ = _write\n\tout._getitem_ = _getitem\n\tout._getattr_ = _getattr\n\n\t# allow iterators and list comprehension\n\tout._getiter_ = iter\n\tout._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence\n\tout.sorted = sorted\n\n\treturn out\n\ndef read_sql(query, *args, **kwargs):\n\t'''a wrapper for frappe.db.sql to allow reads'''\n\tif query.strip().split(None, 1)[0].lower() == 'select':\n\t\treturn frappe.db.sql(query, *args, **kwargs)\n\telse:\n\t\traise frappe.PermissionError('Only SELECT SQL allowed in scripting')\n\ndef run_script(script):\n\t'''run another server script'''\n\treturn frappe.get_doc('Server Script', script).execute_method()\n\ndef _getitem(obj, key):\n\t# guard function for RestrictedPython\n\t# allow any key to be accessed as long as it does not start with underscore\n\tif isinstance(key, str) and key.startswith('_'):\n\t\traise SyntaxError('Key starts with _')\n\treturn obj[key]\n\ndef _getattr(object, name, default=None):\n\t# guard function for RestrictedPython\n\t# allow any key to be accessed as long as\n\t# 1. it does not start with an underscore (safer_getattr)\n\t# 2. it is not an UNSAFE_ATTRIBUTES\n\n\tUNSAFE_ATTRIBUTES = {\n\t\t# Generator Attributes\n\t\t\"gi_frame\", \"gi_code\",\n\t\t# Coroutine Attributes\n\t\t\"cr_frame\", \"cr_code\", \"cr_origin\",\n\t\t# Async Generator Attributes\n\t\t\"ag_code\", \"ag_frame\",\n\t\t# Traceback Attributes\n\t\t\"tb_frame\", \"tb_next\",\n\t}\n\n\tif isinstance(name, str) and (name in UNSAFE_ATTRIBUTES):\n\t\traise SyntaxError(\"{name} is an unsafe attribute\".format(name=name))\n\treturn RestrictedPython.Guards.safer_getattr(object, name, default=default)\n\ndef _write(obj):\n\t# guard function for RestrictedPython\n\t# allow writing to any object\n\treturn obj\n\ndef add_data_utils(data):\n\tfor key, obj in frappe.utils.data.__dict__.items():\n\t\tif key in VALID_UTILS:\n\t\t\tdata[key] = obj\n\ndef add_module_properties(module, data, filter_method):\n\tfor key, obj in module.__dict__.items():\n\t\tif key.startswith(\"_\"):\n\t\t\t# ignore\n\t\t\tcontinue\n\n\t\tif filter_method(obj):\n\t\t\t# only allow functions\n\t\t\tdata[key] = obj\n\nVALID_UTILS = (\n\"DATE_FORMAT\",\n\"TIME_FORMAT\",\n\"DATETIME_FORMAT\",\n\"is_invalid_date_string\",\n\"getdate\",\n\"get_datetime\",\n\"to_timedelta\",\n\"get_timedelta\",\n\"add_to_date\",\n\"add_days\",\n\"add_months\",\n\"add_years\",\n\"date_diff\",\n\"month_diff\",\n\"time_diff\",\n\"time_diff_in_seconds\",\n\"time_diff_in_hours\",\n\"now_datetime\",\n\"get_timestamp\",\n\"get_eta\",\n\"get_time_zone\",\n\"convert_utc_to_user_timezone\",\n\"now\",\n\"nowdate\",\n\"today\",\n\"nowtime\",\n\"get_first_day\",\n\"get_quarter_start\",\n\"get_first_day_of_week\",\n\"get_year_start\",\n\"get_last_day_of_week\",\n\"get_last_day\",\n\"get_time\",\n\"get_datetime_in_timezone\",\n\"get_datetime_str\",\n\"get_date_str\",\n\"get_time_str\",\n\"get_user_date_format\",\n\"get_user_time_format\",\n\"format_date\",\n\"format_time\",\n\"format_datetime\",\n\"format_duration\",\n\"get_weekdays\",\n\"get_weekday\",\n\"get_timespan_date_range\",\n\"global_date_format\",\n\"has_common\",\n\"flt\",\n\"cint\",\n\"floor\",\n\"ceil\",\n\"cstr\",\n\"rounded\",\n\"remainder\",\n\"safe_div\",\n\"round_based_on_smallest_currency_fraction\",\n\"encode\",\n\"parse_val\",\n\"fmt_money\",\n\"get_number_format_info\",\n\"money_in_words\",\n\"in_words\",\n\"is_html\",\n\"is_image\",\n\"get_thumbnail_base64_for_image\",\n\"image_to_base64\",\n\"strip_html\",\n\"escape_html\",\n\"pretty_date\",\n\"comma_or\",\n\"comma_and\",\n\"comma_sep\",\n\"new_line_sep\",\n\"filter_strip_join\",\n\"get_url\",\n\"get_host_name_from_request\",\n\"url_contains_port\",\n\"get_host_name\",\n\"get_link_to_form\",\n\"get_link_to_report\",\n\"get_absolute_url\",\n\"get_url_to_form\",\n\"get_url_to_list\",\n\"get_url_to_report\",\n\"get_url_to_report_with_filters\",\n\"evaluate_filters\",\n\"compare\",\n\"get_filter\",\n\"make_filter_tuple\",\n\"make_filter_dict\",\n\"sanitize_column\",\n\"scrub_urls\",\n\"expand_relative_urls\",\n\"quoted\",\n\"quote_urls\",\n\"unique\",\n\"strip\",\n\"to_markdown\",\n\"md_to_html\",\n\"markdown\",\n\"is_subset\",\n\"generate_hash\",\n\"formatdate\",\n\"get_user_info_for_avatar\",\n\"get_abbr\"\n)\n", "path": "frappe/utils/safe_exec.py"}]} | 3,678 | 120 |
gh_patches_debug_57932 | rasdani/github-patches | git_diff | scrapy__scrapy-3825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue with Twisted and Python 3.4
Twisted had a patch 3 days ago and it's causing test suite to fail for py34 environment.
Twisted , according to their Readme, support Python 3.5+. This needs to be fixed if the builds need to pass
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 extras_require = {}
22
23 if has_environment_marker_platform_impl_support():
24 extras_require[':platform_python_implementation == "PyPy"'] = [
25 'PyPyDispatcher>=2.1.0',
26 ]
27
28
29 setup(
30 name='Scrapy',
31 version=version,
32 url='https://scrapy.org',
33 description='A high-level Web Crawling and Web Scraping framework',
34 long_description=open('README.rst').read(),
35 author='Scrapy developers',
36 maintainer='Pablo Hoffman',
37 maintainer_email='[email protected]',
38 license='BSD',
39 packages=find_packages(exclude=('tests', 'tests.*')),
40 include_package_data=True,
41 zip_safe=False,
42 entry_points={
43 'console_scripts': ['scrapy = scrapy.cmdline:execute']
44 },
45 classifiers=[
46 'Framework :: Scrapy',
47 'Development Status :: 5 - Production/Stable',
48 'Environment :: Console',
49 'Intended Audience :: Developers',
50 'License :: OSI Approved :: BSD License',
51 'Operating System :: OS Independent',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 'Programming Language :: Python :: 3.7',
60 'Programming Language :: Python :: Implementation :: CPython',
61 'Programming Language :: Python :: Implementation :: PyPy',
62 'Topic :: Internet :: WWW/HTTP',
63 'Topic :: Software Development :: Libraries :: Application Frameworks',
64 'Topic :: Software Development :: Libraries :: Python Modules',
65 ],
66 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
67 install_requires=[
68 'Twisted>=13.1.0',
69 'w3lib>=1.17.0',
70 'queuelib',
71 'lxml',
72 'pyOpenSSL',
73 'cssselect>=0.9',
74 'six>=1.5.2',
75 'parsel>=1.5',
76 'PyDispatcher>=2.0.5',
77 'service_identity',
78 ],
79 extras_require=extras_require,
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,7 +65,8 @@
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=[
- 'Twisted>=13.1.0',
+ 'Twisted>=13.1.0;python_version!="3.4"',
+ 'Twisted>=13.1.0,<=19.2.0;python_version=="3.4"',
'w3lib>=1.17.0',
'queuelib',
'lxml',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,8 @@\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n- 'Twisted>=13.1.0',\n+ 'Twisted>=13.1.0;python_version!=\"3.4\"',\n+ 'Twisted>=13.1.0,<=19.2.0;python_version==\"3.4\"',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n", "issue": "Issue with Twisted and Python 3.4 \nTwisted had a patch 3 days ago and it's causing test suite to fail for py34 environment. \r\nTwisted , according to their Readme, support Python 3.5+. This needs to be fixed if the builds need to pass\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'Twisted>=13.1.0',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n 'parsel>=1.5',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'Twisted>=13.1.0;python_version!=\"3.4\"',\n 'Twisted>=13.1.0,<=19.2.0;python_version==\"3.4\"',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n 'parsel>=1.5',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]} | 1,152 | 153 |
gh_patches_debug_31720 | rasdani/github-patches | git_diff | meltano__meltano-7115 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow vendor-specific annotations in `meltano.yml`
Discussed in https://github.com/meltano/meltano/discussions/7053
We should update `meltano.schema.json` to permit an object with arbitrary fields (i.e. `"additionalProperties": true`) within any object in the schema that does not already impose restrictions on the permitted additional properties. That is to say:
- If the object has `"additionalProperties": true`, then it can have `annotations`, and no changes are required to the schema.
- If the object has `"additionalProperties": false`, then it can have `annotations` and we must explicitly add it to the properties.
- If the object has `additionalProperties` set to anything else (e.g. limiting it to strings as `env` does), then annotations are not supported, and no change to the schema is necessary.
So the only places in the schema that need to be updated are where it says `"additionalProperties": false`.
Documentation around what this field is for should be added. It should be made clear that the core Meltano library/CLI does not use this field, but it may be used for Meltano Cloud, or third-party tools.
The documentation should also state that we impose no limitation on how it can be used except for requiring that the top-level of each annotation object may only contain objects as properties, and that we recommend that third-party tools keep their annotations under a sensible key, such as the name of the tool.
Testing should be done by adding annotations liberally to integration tests, and ensuring that they are ignored.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/meltano_file.py`
Content:
```
1 """Module for working with meltano.yml files."""
2 from __future__ import annotations
3
4 import copy
5 from typing import Iterable
6
7 from meltano.core.behavior.canonical import Canonical
8 from meltano.core.environment import Environment
9 from meltano.core.plugin import PluginType
10 from meltano.core.plugin.project_plugin import ProjectPlugin
11 from meltano.core.schedule import Schedule
12 from meltano.core.task_sets import TaskSets
13
14 VERSION = 1
15
16
17 class MeltanoFile(Canonical):
18 """Data and loading methods for meltano.yml files."""
19
20 def __init__(
21 self,
22 version: int = VERSION,
23 plugins: dict[str, dict] = None,
24 schedules: list[dict] = None,
25 environments: list[dict] = None,
26 jobs: list[dict] = None,
27 env: dict[str, str] = None,
28 **extras,
29 ):
30 """Construct a new MeltanoFile object from meltano.yml file.
31
32 Args:
33 version: The meltano.yml version, currently always 1.
34 plugins: Plugin configuration for this project.
35 schedules: Schedule configuration for this project.
36 environments: Environment configuration for this project.
37 jobs: Job configuration for this project.
38 env: Environment variables for this project.
39 extras: Additional configuration for this project.
40 """
41 super().__init__(
42 # Attributes will be listed in meltano.yml in this order:
43 version=version,
44 extras=extras,
45 plugins=self.load_plugins(plugins or {}),
46 schedules=self.load_schedules(schedules or []),
47 environments=self.load_environments(environments or []),
48 jobs=self.load_job_tasks(jobs or []),
49 env=env or {},
50 )
51
52 def load_plugins(self, plugins: dict[str, dict]) -> Canonical:
53 """Parse the `meltano.yml` file and return it as `ProjectPlugin` instances.
54
55 Args:
56 plugins: Dictionary of plugin configurations.
57
58 Returns:
59 New ProjectPlugin instances.
60 """
61 plugin_type_plugins = Canonical()
62
63 for ptype in PluginType:
64 plugin_type_plugins[ptype] = []
65
66 # this will parse the meltano.yml file and create an instance of the
67 # corresponding `plugin_class` for all the plugins.
68 for plugin_type, raw_plugins in plugins.items():
69 if plugin_type == PluginType.MAPPERS:
70 for mapper in raw_plugins:
71 plugin_type_plugins[PluginType.MAPPERS].append(
72 ProjectPlugin(PluginType.MAPPERS, **mapper)
73 )
74 plugin_type_plugins[PluginType.MAPPERS].extend(
75 self.get_plugins_for_mappings(mapper)
76 )
77 else:
78 for raw_plugin in raw_plugins:
79 plugin = ProjectPlugin(PluginType(plugin_type), **raw_plugin)
80 plugin_type_plugins[plugin.type].append(plugin)
81
82 return plugin_type_plugins
83
84 def load_schedules(self, schedules: list[dict]) -> list[Schedule]:
85 """Parse the meltano.yml file and return it as Schedule instances.
86
87 Args:
88 schedules: List of schedule configurations.
89
90 Returns:
91 List of new Schedule instances.
92 """
93 return list(map(Schedule.parse, schedules))
94
95 @staticmethod
96 def load_environments(environments: Iterable[dict]) -> list[Environment]:
97 """Parse `Environment` objects from python objects.
98
99 Args:
100 environments: Sequence of environment dictionaries.
101
102 Returns:
103 A list of `Environment` objects.
104 """
105 return [Environment.parse(obj) for obj in environments]
106
107 @staticmethod
108 def load_job_tasks(jobs: Iterable[dict]) -> list[TaskSets]:
109 """Parse `TaskSets` objects from python objects.
110
111 Args:
112 jobs: Sequence of job dictionaries.
113
114 Returns:
115 A list of `Job` objects.
116 """
117 return [TaskSets.parse(obj) for obj in jobs]
118
119 @staticmethod
120 def get_plugins_for_mappings(mapper_config: dict) -> list[ProjectPlugin]:
121 """Mapper plugins are a special case. They are not a single plugin, but actually a list of plugins generated from the mapping config defined within the mapper config.
122
123 Args:
124 mapper_config: The dict representation of a mapper config found in in meltano.yml.
125
126 Returns:
127 A list of `ProjectPlugin` instances.
128 """
129 mapping_plugins: list[ProjectPlugin] = []
130 for mapping in mapper_config.get("mappings", []):
131 raw_mapping_plugin = copy.deepcopy(mapper_config)
132 raw_mapping_plugin["mapping"] = True
133 raw_mapping_plugin["mapping_name"] = mapping.get("name")
134 raw_mapping_plugin["config"] = mapping.get("config")
135 mapping_plugins.append(
136 ProjectPlugin(PluginType.MAPPERS, **raw_mapping_plugin)
137 )
138 return mapping_plugins
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/core/meltano_file.py b/src/meltano/core/meltano_file.py
--- a/src/meltano/core/meltano_file.py
+++ b/src/meltano/core/meltano_file.py
@@ -1,8 +1,9 @@
"""Module for working with meltano.yml files."""
+
from __future__ import annotations
import copy
-from typing import Iterable
+from typing import Any, Iterable
from meltano.core.behavior.canonical import Canonical
from meltano.core.environment import Environment
@@ -25,6 +26,7 @@
environments: list[dict] = None,
jobs: list[dict] = None,
env: dict[str, str] = None,
+ annotations: dict[str, dict[Any, Any]] | None = None, # noqa: WPS442
**extras,
):
"""Construct a new MeltanoFile object from meltano.yml file.
@@ -36,6 +38,7 @@
environments: Environment configuration for this project.
jobs: Job configuration for this project.
env: Environment variables for this project.
+ annotations: Annotations for external tools/vendors - do not access.
extras: Additional configuration for this project.
"""
super().__init__(
@@ -47,6 +50,7 @@
environments=self.load_environments(environments or []),
jobs=self.load_job_tasks(jobs or []),
env=env or {},
+ annotations=annotations,
)
def load_plugins(self, plugins: dict[str, dict]) -> Canonical:
| {"golden_diff": "diff --git a/src/meltano/core/meltano_file.py b/src/meltano/core/meltano_file.py\n--- a/src/meltano/core/meltano_file.py\n+++ b/src/meltano/core/meltano_file.py\n@@ -1,8 +1,9 @@\n \"\"\"Module for working with meltano.yml files.\"\"\"\n+\n from __future__ import annotations\n \n import copy\n-from typing import Iterable\n+from typing import Any, Iterable\n \n from meltano.core.behavior.canonical import Canonical\n from meltano.core.environment import Environment\n@@ -25,6 +26,7 @@\n environments: list[dict] = None,\n jobs: list[dict] = None,\n env: dict[str, str] = None,\n+ annotations: dict[str, dict[Any, Any]] | None = None, # noqa: WPS442\n **extras,\n ):\n \"\"\"Construct a new MeltanoFile object from meltano.yml file.\n@@ -36,6 +38,7 @@\n environments: Environment configuration for this project.\n jobs: Job configuration for this project.\n env: Environment variables for this project.\n+ annotations: Annotations for external tools/vendors - do not access.\n extras: Additional configuration for this project.\n \"\"\"\n super().__init__(\n@@ -47,6 +50,7 @@\n environments=self.load_environments(environments or []),\n jobs=self.load_job_tasks(jobs or []),\n env=env or {},\n+ annotations=annotations,\n )\n \n def load_plugins(self, plugins: dict[str, dict]) -> Canonical:\n", "issue": "Allow vendor-specific annotations in `meltano.yml`\nDiscussed in https://github.com/meltano/meltano/discussions/7053\r\n\r\nWe should update `meltano.schema.json` to permit an object with arbitrary fields (i.e. `\"additionalProperties\": true`) within any object in the schema that does not already impose restrictions on the permitted additional properties. That is to say:\r\n- If the object has `\"additionalProperties\": true`, then it can have `annotations`, and no changes are required to the schema.\r\n- If the object has `\"additionalProperties\": false`, then it can have `annotations` and we must explicitly add it to the properties.\r\n- If the object has `additionalProperties` set to anything else (e.g. limiting it to strings as `env` does), then annotations are not supported, and no change to the schema is necessary.\r\n\r\nSo the only places in the schema that need to be updated are where it says `\"additionalProperties\": false`.\r\n\r\nDocumentation around what this field is for should be added. It should be made clear that the core Meltano library/CLI does not use this field, but it may be used for Meltano Cloud, or third-party tools.\r\n\r\nThe documentation should also state that we impose no limitation on how it can be used except for requiring that the top-level of each annotation object may only contain objects as properties, and that we recommend that third-party tools keep their annotations under a sensible key, such as the name of the tool.\r\n\r\nTesting should be done by adding annotations liberally to integration tests, and ensuring that they are ignored.\n", "before_files": [{"content": "\"\"\"Module for working with meltano.yml files.\"\"\"\nfrom __future__ import annotations\n\nimport copy\nfrom typing import Iterable\n\nfrom meltano.core.behavior.canonical import Canonical\nfrom meltano.core.environment import Environment\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.schedule import Schedule\nfrom meltano.core.task_sets import TaskSets\n\nVERSION = 1\n\n\nclass MeltanoFile(Canonical):\n \"\"\"Data and loading methods for meltano.yml files.\"\"\"\n\n def __init__(\n self,\n version: int = VERSION,\n plugins: dict[str, dict] = None,\n schedules: list[dict] = None,\n environments: list[dict] = None,\n jobs: list[dict] = None,\n env: dict[str, str] = None,\n **extras,\n ):\n \"\"\"Construct a new MeltanoFile object from meltano.yml file.\n\n Args:\n version: The meltano.yml version, currently always 1.\n plugins: Plugin configuration for this project.\n schedules: Schedule configuration for this project.\n environments: Environment configuration for this project.\n jobs: Job configuration for this project.\n env: Environment variables for this project.\n extras: Additional configuration for this project.\n \"\"\"\n super().__init__(\n # Attributes will be listed in meltano.yml in this order:\n version=version,\n extras=extras,\n plugins=self.load_plugins(plugins or {}),\n schedules=self.load_schedules(schedules or []),\n environments=self.load_environments(environments or []),\n jobs=self.load_job_tasks(jobs or []),\n env=env or {},\n )\n\n def load_plugins(self, plugins: dict[str, dict]) -> Canonical:\n \"\"\"Parse the `meltano.yml` file and return it as `ProjectPlugin` instances.\n\n Args:\n plugins: Dictionary of plugin configurations.\n\n Returns:\n New ProjectPlugin instances.\n \"\"\"\n plugin_type_plugins = Canonical()\n\n for ptype in PluginType:\n plugin_type_plugins[ptype] = []\n\n # this will parse the meltano.yml file and create an instance of the\n # corresponding `plugin_class` for all the plugins.\n for plugin_type, raw_plugins in plugins.items():\n if plugin_type == PluginType.MAPPERS:\n for mapper in raw_plugins:\n plugin_type_plugins[PluginType.MAPPERS].append(\n ProjectPlugin(PluginType.MAPPERS, **mapper)\n )\n plugin_type_plugins[PluginType.MAPPERS].extend(\n self.get_plugins_for_mappings(mapper)\n )\n else:\n for raw_plugin in raw_plugins:\n plugin = ProjectPlugin(PluginType(plugin_type), **raw_plugin)\n plugin_type_plugins[plugin.type].append(plugin)\n\n return plugin_type_plugins\n\n def load_schedules(self, schedules: list[dict]) -> list[Schedule]:\n \"\"\"Parse the meltano.yml file and return it as Schedule instances.\n\n Args:\n schedules: List of schedule configurations.\n\n Returns:\n List of new Schedule instances.\n \"\"\"\n return list(map(Schedule.parse, schedules))\n\n @staticmethod\n def load_environments(environments: Iterable[dict]) -> list[Environment]:\n \"\"\"Parse `Environment` objects from python objects.\n\n Args:\n environments: Sequence of environment dictionaries.\n\n Returns:\n A list of `Environment` objects.\n \"\"\"\n return [Environment.parse(obj) for obj in environments]\n\n @staticmethod\n def load_job_tasks(jobs: Iterable[dict]) -> list[TaskSets]:\n \"\"\"Parse `TaskSets` objects from python objects.\n\n Args:\n jobs: Sequence of job dictionaries.\n\n Returns:\n A list of `Job` objects.\n \"\"\"\n return [TaskSets.parse(obj) for obj in jobs]\n\n @staticmethod\n def get_plugins_for_mappings(mapper_config: dict) -> list[ProjectPlugin]:\n \"\"\"Mapper plugins are a special case. They are not a single plugin, but actually a list of plugins generated from the mapping config defined within the mapper config.\n\n Args:\n mapper_config: The dict representation of a mapper config found in in meltano.yml.\n\n Returns:\n A list of `ProjectPlugin` instances.\n \"\"\"\n mapping_plugins: list[ProjectPlugin] = []\n for mapping in mapper_config.get(\"mappings\", []):\n raw_mapping_plugin = copy.deepcopy(mapper_config)\n raw_mapping_plugin[\"mapping\"] = True\n raw_mapping_plugin[\"mapping_name\"] = mapping.get(\"name\")\n raw_mapping_plugin[\"config\"] = mapping.get(\"config\")\n mapping_plugins.append(\n ProjectPlugin(PluginType.MAPPERS, **raw_mapping_plugin)\n )\n return mapping_plugins\n", "path": "src/meltano/core/meltano_file.py"}], "after_files": [{"content": "\"\"\"Module for working with meltano.yml files.\"\"\"\n\nfrom __future__ import annotations\n\nimport copy\nfrom typing import Any, Iterable\n\nfrom meltano.core.behavior.canonical import Canonical\nfrom meltano.core.environment import Environment\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.schedule import Schedule\nfrom meltano.core.task_sets import TaskSets\n\nVERSION = 1\n\n\nclass MeltanoFile(Canonical):\n \"\"\"Data and loading methods for meltano.yml files.\"\"\"\n\n def __init__(\n self,\n version: int = VERSION,\n plugins: dict[str, dict] = None,\n schedules: list[dict] = None,\n environments: list[dict] = None,\n jobs: list[dict] = None,\n env: dict[str, str] = None,\n annotations: dict[str, dict[Any, Any]] | None = None, # noqa: WPS442\n **extras,\n ):\n \"\"\"Construct a new MeltanoFile object from meltano.yml file.\n\n Args:\n version: The meltano.yml version, currently always 1.\n plugins: Plugin configuration for this project.\n schedules: Schedule configuration for this project.\n environments: Environment configuration for this project.\n jobs: Job configuration for this project.\n env: Environment variables for this project.\n annotations: Annotations for external tools/vendors - do not access.\n extras: Additional configuration for this project.\n \"\"\"\n super().__init__(\n # Attributes will be listed in meltano.yml in this order:\n version=version,\n extras=extras,\n plugins=self.load_plugins(plugins or {}),\n schedules=self.load_schedules(schedules or []),\n environments=self.load_environments(environments or []),\n jobs=self.load_job_tasks(jobs or []),\n env=env or {},\n annotations=annotations,\n )\n\n def load_plugins(self, plugins: dict[str, dict]) -> Canonical:\n \"\"\"Parse the `meltano.yml` file and return it as `ProjectPlugin` instances.\n\n Args:\n plugins: Dictionary of plugin configurations.\n\n Returns:\n New ProjectPlugin instances.\n \"\"\"\n plugin_type_plugins = Canonical()\n\n for ptype in PluginType:\n plugin_type_plugins[ptype] = []\n\n # this will parse the meltano.yml file and create an instance of the\n # corresponding `plugin_class` for all the plugins.\n for plugin_type, raw_plugins in plugins.items():\n if plugin_type == PluginType.MAPPERS:\n for mapper in raw_plugins:\n plugin_type_plugins[PluginType.MAPPERS].append(\n ProjectPlugin(PluginType.MAPPERS, **mapper)\n )\n plugin_type_plugins[PluginType.MAPPERS].extend(\n self.get_plugins_for_mappings(mapper)\n )\n else:\n for raw_plugin in raw_plugins:\n plugin = ProjectPlugin(PluginType(plugin_type), **raw_plugin)\n plugin_type_plugins[plugin.type].append(plugin)\n\n return plugin_type_plugins\n\n def load_schedules(self, schedules: list[dict]) -> list[Schedule]:\n \"\"\"Parse the meltano.yml file and return it as Schedule instances.\n\n Args:\n schedules: List of schedule configurations.\n\n Returns:\n List of new Schedule instances.\n \"\"\"\n return list(map(Schedule.parse, schedules))\n\n @staticmethod\n def load_environments(environments: Iterable[dict]) -> list[Environment]:\n \"\"\"Parse `Environment` objects from python objects.\n\n Args:\n environments: Sequence of environment dictionaries.\n\n Returns:\n A list of `Environment` objects.\n \"\"\"\n return [Environment.parse(obj) for obj in environments]\n\n @staticmethod\n def load_job_tasks(jobs: Iterable[dict]) -> list[TaskSets]:\n \"\"\"Parse `TaskSets` objects from python objects.\n\n Args:\n jobs: Sequence of job dictionaries.\n\n Returns:\n A list of `Job` objects.\n \"\"\"\n return [TaskSets.parse(obj) for obj in jobs]\n\n @staticmethod\n def get_plugins_for_mappings(mapper_config: dict) -> list[ProjectPlugin]:\n \"\"\"Mapper plugins are a special case. They are not a single plugin, but actually a list of plugins generated from the mapping config defined within the mapper config.\n\n Args:\n mapper_config: The dict representation of a mapper config found in in meltano.yml.\n\n Returns:\n A list of `ProjectPlugin` instances.\n \"\"\"\n mapping_plugins: list[ProjectPlugin] = []\n for mapping in mapper_config.get(\"mappings\", []):\n raw_mapping_plugin = copy.deepcopy(mapper_config)\n raw_mapping_plugin[\"mapping\"] = True\n raw_mapping_plugin[\"mapping_name\"] = mapping.get(\"name\")\n raw_mapping_plugin[\"config\"] = mapping.get(\"config\")\n mapping_plugins.append(\n ProjectPlugin(PluginType.MAPPERS, **raw_mapping_plugin)\n )\n return mapping_plugins\n", "path": "src/meltano/core/meltano_file.py"}]} | 1,904 | 339 |
gh_patches_debug_41084 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Penalty weights are incorrect
**Describe the bug**
See title.
**To Reproduce**
Steps to reproduce the behavior:
1. Any segmentation/classification problem
2. Take a print at the end of `GANDLF.utils.tensor.get_class_imbalance_weights`
3. See that `penalty_dict` and `weights_dict` are the same
For example, for a small I-SPY dataset:
```
penalty_dict: {0: 0.9928165871714076, 1: 0.0071834128285923945}
weights_dict: {0: 0.9928165871714076, 1: 0.0071834128285923945}
```
**Expected behavior**
They should be `1-each_other`. In the above example, it should be this:
```
penalty_dict: {0: 0.0071834128285923945, 1: 0.9928165871714076}
weights_dict: {0: 0.9928165871714076, 1: 0.0071834128285923945}
```
**Screenshots**
N.A.
**GaNDLF Version**
0.0.12-dev
**Desktop (please complete the following information):**
N.A.
**Additional context**
Reported by @iskobleva
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `GANDLF/utils/tensor.py`
Content:
```
1 import os, sys
2 import numpy as np
3 import torch
4 import torch.nn as nn
5 import torchio
6
7
8 def one_hot(segmask_array, class_list):
9 """
10 This function creates a one-hot-encoded mask from the segmentation mask Tensor and specified class list
11
12 Args:
13 segmask_array (torch.Tensor): The segmentation mask Tensor.
14 class_list (list): The list of classes based on which one-hot encoding needs to happen.
15
16 Returns:
17 torch.Tensor: The one-hot encoded torch.Tensor
18 """
19 batch_size = segmask_array.shape[0]
20 batch_stack = []
21 for b in range(batch_size):
22 one_hot_stack = []
23 segmask_array_iter = segmask_array[b, 0]
24 bin_mask = segmask_array_iter == 0 # initialize bin_mask
25 for (
26 _class
27 ) in class_list: # this implementation allows users to combine logical operands
28 if isinstance(_class, str):
29 if "||" in _class: # special case
30 class_split = _class.split("||")
31 bin_mask = segmask_array_iter == int(class_split[0])
32 for i in range(1, len(class_split)):
33 bin_mask = bin_mask | (
34 segmask_array_iter == int(class_split[i])
35 )
36 elif "|" in _class: # special case
37 class_split = _class.split("|")
38 bin_mask = segmask_array_iter == int(class_split[0])
39 for i in range(1, len(class_split)):
40 bin_mask = bin_mask | (
41 segmask_array_iter == int(class_split[i])
42 )
43 else:
44 # assume that it is a simple int
45 bin_mask = segmask_array_iter == int(_class)
46 else:
47 bin_mask = segmask_array_iter == int(_class)
48 bin_mask = bin_mask.long()
49 one_hot_stack.append(bin_mask)
50 one_hot_stack = torch.stack(one_hot_stack)
51 batch_stack.append(one_hot_stack)
52 batch_stack = torch.stack(batch_stack)
53 return batch_stack
54
55
56 def reverse_one_hot(predmask_array, class_list):
57 """
58 This function creates a full segmentation mask Tensor from a one-hot-encoded mask and specified class list
59
60 Args:
61 predmask_array (torch.Tensor): The predicted segmentation mask Tensor.
62 class_list (list): The list of classes based on which one-hot encoding needs to happen.
63
64 Returns:
65 torch.Tensor: The final mask torch.Tensor.
66 """
67 if isinstance(predmask_array, torch.Tensor):
68 array_to_consider = predmask_array.cpu().numpy()
69 else:
70 array_to_consider = predmask_array
71 idx_argmax = np.argmax(array_to_consider, axis=0)
72 final_mask = 0
73 special_cases_to_check = ["||"]
74 special_case_detected = False
75 max_current = 0
76
77 for _class in class_list:
78 for case in special_cases_to_check:
79 if isinstance(_class, str):
80 if case in _class: # check if any of the special cases are present
81 special_case_detected = True
82 class_split = _class.split(
83 case
84 ) # if present, then split the sub-class
85 for i in class_split: # find the max for computation later on
86 if int(i) > max_current:
87 max_current = int(i)
88
89 if special_case_detected:
90 start_idx = 0
91 if (class_list[0] == 0) or (class_list[0] == "0"):
92 start_idx = 1
93
94 final_mask = np.asarray(
95 predmask_array[start_idx, :, :, :], dtype=int
96 ) # predmask_array[0,:,:,:].long()
97 start_idx += 1
98 for i in range(start_idx, len(class_list)):
99 final_mask += np.asarray(
100 predmask_array[0, :, :, :], dtype=int
101 ) # predmask_array[i,:,:,:].long()
102 # temp_sum = torch.sum(output)
103 # output_2 = (max_current - torch.sum(output)) % max_current
104 # test_2 = 1
105 else:
106 for idx, _class in enumerate(class_list):
107 final_mask = final_mask + (idx_argmax == idx) * _class
108 return final_mask
109
110
111 def send_model_to_device(model, amp, device, optimizer):
112 """
113 This function reads the environment variable(s) and send model to correct device
114
115 Args:
116 model (torch.nn.Module): The model that needs to be sent to specified device.
117 amp (bool): Whether automatic mixed precision is to be used.
118 device (str): Device type.
119 optimizer (torch.optim): The optimizer for training.
120
121 Returns:
122 torch.nn.Module: The model after it has been sent to specified device
123 bool: Whether automatic mixed precision is to be used or not.
124 torch.device: Device type.
125 """
126 if device != "cpu":
127 if os.environ.get("CUDA_VISIBLE_DEVICES") is None:
128 sys.exit(
129 "Please set the environment variable 'CUDA_VISIBLE_DEVICES' correctly before trying to run GANDLF on GPU"
130 )
131
132 dev = os.environ.get("CUDA_VISIBLE_DEVICES")
133 # multi-gpu support
134 # ###
135 # # https://discuss.pytorch.org/t/cuda-visible-devices-make-gpu-disappear/21439/17?u=sarthakpati
136 # ###
137 if "," in dev:
138 device = torch.device("cuda")
139 model = nn.DataParallel(model, "[" + dev + "]")
140 else:
141 print("Device requested via CUDA_VISIBLE_DEVICES: ", dev)
142 print("Total number of CUDA devices: ", torch.cuda.device_count())
143
144 # if only a single visible device, it will be indexed as '0'
145 if torch.cuda.device_count() == 1:
146 dev = "0"
147
148 dev_int = int(dev)
149 print("Device finally used: ", dev)
150 # device = torch.device('cuda:' + dev)
151 device = torch.device("cuda")
152 print("Sending model to aforementioned device")
153 model = model.to(device)
154 print(
155 "Memory Total : ",
156 round(
157 torch.cuda.get_device_properties(dev_int).total_memory / 1024 ** 3,
158 1,
159 ),
160 "GB, Allocated: ",
161 round(torch.cuda.memory_allocated(dev_int) / 1024 ** 3, 1),
162 "GB, Cached: ",
163 round(torch.cuda.memory_reserved(dev_int) / 1024 ** 3, 1),
164 "GB",
165 )
166
167 print(
168 "Device - Current: %s Count: %d Name: %s Availability: %s"
169 % (
170 torch.cuda.current_device(),
171 torch.cuda.device_count(),
172 torch.cuda.get_device_name(device),
173 torch.cuda.is_available(),
174 )
175 )
176
177 if not (optimizer is None):
178 # ensuring optimizer is in correct device - https://github.com/pytorch/pytorch/issues/8741
179 optimizer.load_state_dict(optimizer.state_dict())
180
181 else:
182 dev = -1
183 device = torch.device("cpu")
184 model.cpu()
185 amp = False
186 print("Since Device is CPU, Mixed Precision Training is set to False")
187
188 return model, amp, device
189
190
191 def get_class_imbalance_weights(training_data_loader, parameters):
192 """
193 This function calculates the penalty that is used for validation loss in multi-class problems
194
195 Args:
196 training_data_loader (torch.utils.data.DataLoader): The training data loader.
197 parameters (dict): The parameters passed by the user yaml.
198
199 Returns:
200 dict: The penalty weights for different classes under consideration.
201 """
202 abs_dict = {} # absolute counts for each class
203 weights_dict = {} # average for "weighted averaging"
204 penalty_dict = None # penalty for misclassification
205 # basically, do this for segmentation/classification tasks
206
207 if parameters["problem_type"] != "regression":
208 penalty_dict = {}
209 for i in range(0, len(parameters["model"]["class_list"])):
210 abs_dict[i] = 0
211 penalty_dict[i] = 0
212
213 penalty_loader = training_data_loader
214
215 # get the weights for use for dice loss
216 total_counter = 0
217
218 # For regression dice penalty need not be taken account
219 # For classification this should be calculated on the basis of predicted labels and mask
220 # iterate through full penalty data
221 for _, (subject) in enumerate(penalty_loader):
222
223 # segmentation needs masks to be one-hot encoded
224 if parameters["problem_type"] == "segmentation":
225 # accumulate dice weights for each label
226 mask = subject["label"][torchio.DATA]
227 one_hot_mask = one_hot(mask, parameters["model"]["class_list"])
228 for i in range(0, len(parameters["model"]["class_list"])):
229 currentNumber = torch.nonzero(
230 one_hot_mask[:, i, :, :, :], as_tuple=False
231 ).size(0)
232 # class-specific non-zero voxels
233 abs_dict[i] += currentNumber
234 # total number of non-zero voxels to be considered
235 total_counter += currentNumber
236
237 # for classification, the value needs to be used directly
238 elif parameters["problem_type"] == "classification":
239 # accumulate weights for each label
240 value_to_predict = subject["value_0"][0]
241 for i in range(0, len(parameters["model"]["class_list"])):
242 if value_to_predict == i:
243 abs_dict[i] += 1
244 # we only want to increase the counter for those subjects that are defined in the class_list
245 total_counter += 1
246
247 # Normalize class weights
248 weights_dict = {key: val / total_counter for key, val in abs_dict.items()}
249
250 # get the penalty values - abs_dict contains the overall number for each class in the penalty data
251 for i in range(0, len(parameters["model"]["class_list"])):
252 penalty = total_counter # start with the assumption that all the non-zero voxels (segmentation) or activate labels (classification) make up the penalty
253 for j in range(0, len(parameters["model"]["class_list"])):
254 if i != j: # for differing classes, subtract the current weight
255 penalty -= abs_dict[j]
256
257 # finally, the "penalty" variable contains the total number of voxels/activations that are not part of the current class
258 # this is to be used to weight the loss function
259 # adding epsilon to avoid division by zero
260 penalty_dict[i] = (penalty + sys.float_info.epsilon) / (
261 total_counter + sys.float_info.epsilon
262 )
263
264 return penalty_dict, weights_dict
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/GANDLF/utils/tensor.py b/GANDLF/utils/tensor.py
--- a/GANDLF/utils/tensor.py
+++ b/GANDLF/utils/tensor.py
@@ -22,9 +22,8 @@
one_hot_stack = []
segmask_array_iter = segmask_array[b, 0]
bin_mask = segmask_array_iter == 0 # initialize bin_mask
- for (
- _class
- ) in class_list: # this implementation allows users to combine logical operands
+ # this implementation allows users to combine logical operands
+ for _class in class_list:
if isinstance(_class, str):
if "||" in _class: # special case
class_split = _class.split("||")
@@ -79,9 +78,8 @@
if isinstance(_class, str):
if case in _class: # check if any of the special cases are present
special_case_detected = True
- class_split = _class.split(
- case
- ) # if present, then split the sub-class
+ # if present, then split the sub-class
+ class_split = _class.split(case)
for i in class_split: # find the max for computation later on
if int(i) > max_current:
max_current = int(i)
@@ -91,9 +89,7 @@
if (class_list[0] == 0) or (class_list[0] == "0"):
start_idx = 1
- final_mask = np.asarray(
- predmask_array[start_idx, :, :, :], dtype=int
- ) # predmask_array[0,:,:,:].long()
+ final_mask = np.asarray(predmask_array[start_idx, :, :, :], dtype=int)
start_idx += 1
for i in range(start_idx, len(class_list)):
final_mask += np.asarray(
@@ -245,20 +241,21 @@
total_counter += 1
# Normalize class weights
- weights_dict = {key: val / total_counter for key, val in abs_dict.items()}
-
- # get the penalty values - abs_dict contains the overall number for each class in the penalty data
- for i in range(0, len(parameters["model"]["class_list"])):
- penalty = total_counter # start with the assumption that all the non-zero voxels (segmentation) or activate labels (classification) make up the penalty
- for j in range(0, len(parameters["model"]["class_list"])):
- if i != j: # for differing classes, subtract the current weight
- penalty -= abs_dict[j]
-
- # finally, the "penalty" variable contains the total number of voxels/activations that are not part of the current class
- # this is to be used to weight the loss function
- # adding epsilon to avoid division by zero
- penalty_dict[i] = (penalty + sys.float_info.epsilon) / (
- total_counter + sys.float_info.epsilon
- )
+ weights_dict = {
+ key: (val + sys.float_info.epsilon) / total_counter
+ for key, val in abs_dict.items()
+ }
+
+ # get the raw penalty values
+ penalty = {
+ key: total_counter / (len(abs_dict) * (val + sys.float_info.epsilon))
+ for key, val in abs_dict.items()
+ }
+ # normalize penalty to sum of 1
+ penalty_sum = np.fromiter(penalty.values(), dtype=np.float64).sum()
+ penalty_dict = {
+ key: (val + sys.float_info.epsilon) / penalty_sum
+ for key, val in penalty.items()
+ }
return penalty_dict, weights_dict
| {"golden_diff": "diff --git a/GANDLF/utils/tensor.py b/GANDLF/utils/tensor.py\n--- a/GANDLF/utils/tensor.py\n+++ b/GANDLF/utils/tensor.py\n@@ -22,9 +22,8 @@\n one_hot_stack = []\n segmask_array_iter = segmask_array[b, 0]\n bin_mask = segmask_array_iter == 0 # initialize bin_mask\n- for (\n- _class\n- ) in class_list: # this implementation allows users to combine logical operands\n+ # this implementation allows users to combine logical operands\n+ for _class in class_list:\n if isinstance(_class, str):\n if \"||\" in _class: # special case\n class_split = _class.split(\"||\")\n@@ -79,9 +78,8 @@\n if isinstance(_class, str):\n if case in _class: # check if any of the special cases are present\n special_case_detected = True\n- class_split = _class.split(\n- case\n- ) # if present, then split the sub-class\n+ # if present, then split the sub-class\n+ class_split = _class.split(case)\n for i in class_split: # find the max for computation later on\n if int(i) > max_current:\n max_current = int(i)\n@@ -91,9 +89,7 @@\n if (class_list[0] == 0) or (class_list[0] == \"0\"):\n start_idx = 1\n \n- final_mask = np.asarray(\n- predmask_array[start_idx, :, :, :], dtype=int\n- ) # predmask_array[0,:,:,:].long()\n+ final_mask = np.asarray(predmask_array[start_idx, :, :, :], dtype=int)\n start_idx += 1\n for i in range(start_idx, len(class_list)):\n final_mask += np.asarray(\n@@ -245,20 +241,21 @@\n total_counter += 1\n \n # Normalize class weights\n- weights_dict = {key: val / total_counter for key, val in abs_dict.items()}\n-\n- # get the penalty values - abs_dict contains the overall number for each class in the penalty data\n- for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n- penalty = total_counter # start with the assumption that all the non-zero voxels (segmentation) or activate labels (classification) make up the penalty\n- for j in range(0, len(parameters[\"model\"][\"class_list\"])):\n- if i != j: # for differing classes, subtract the current weight\n- penalty -= abs_dict[j]\n-\n- # finally, the \"penalty\" variable contains the total number of voxels/activations that are not part of the current class\n- # this is to be used to weight the loss function\n- # adding epsilon to avoid division by zero\n- penalty_dict[i] = (penalty + sys.float_info.epsilon) / (\n- total_counter + sys.float_info.epsilon\n- )\n+ weights_dict = {\n+ key: (val + sys.float_info.epsilon) / total_counter\n+ for key, val in abs_dict.items()\n+ }\n+\n+ # get the raw penalty values\n+ penalty = {\n+ key: total_counter / (len(abs_dict) * (val + sys.float_info.epsilon))\n+ for key, val in abs_dict.items()\n+ }\n+ # normalize penalty to sum of 1\n+ penalty_sum = np.fromiter(penalty.values(), dtype=np.float64).sum()\n+ penalty_dict = {\n+ key: (val + sys.float_info.epsilon) / penalty_sum\n+ for key, val in penalty.items()\n+ }\n \n return penalty_dict, weights_dict\n", "issue": "Penalty weights are incorrect\n**Describe the bug**\r\nSee title.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Any segmentation/classification problem\r\n2. Take a print at the end of `GANDLF.utils.tensor.get_class_imbalance_weights`\r\n3. See that `penalty_dict` and `weights_dict` are the same\r\n\r\nFor example, for a small I-SPY dataset:\r\n```\r\npenalty_dict: {0: 0.9928165871714076, 1: 0.0071834128285923945} \r\nweights_dict: {0: 0.9928165871714076, 1: 0.0071834128285923945}\r\n```\r\n\r\n**Expected behavior**\r\nThey should be `1-each_other`. In the above example, it should be this:\r\n\r\n```\r\npenalty_dict: {0: 0.0071834128285923945, 1: 0.9928165871714076} \r\nweights_dict: {0: 0.9928165871714076, 1: 0.0071834128285923945}\r\n```\r\n\r\n**Screenshots**\r\nN.A.\r\n\r\n**GaNDLF Version**\r\n0.0.12-dev\r\n\r\n**Desktop (please complete the following information):**\r\nN.A.\r\n\r\n**Additional context**\r\nReported by @iskobleva \r\n\n", "before_files": [{"content": "import os, sys\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchio\n\n\ndef one_hot(segmask_array, class_list):\n \"\"\"\n This function creates a one-hot-encoded mask from the segmentation mask Tensor and specified class list\n\n Args:\n segmask_array (torch.Tensor): The segmentation mask Tensor.\n class_list (list): The list of classes based on which one-hot encoding needs to happen.\n\n Returns:\n torch.Tensor: The one-hot encoded torch.Tensor\n \"\"\"\n batch_size = segmask_array.shape[0]\n batch_stack = []\n for b in range(batch_size):\n one_hot_stack = []\n segmask_array_iter = segmask_array[b, 0]\n bin_mask = segmask_array_iter == 0 # initialize bin_mask\n for (\n _class\n ) in class_list: # this implementation allows users to combine logical operands\n if isinstance(_class, str):\n if \"||\" in _class: # special case\n class_split = _class.split(\"||\")\n bin_mask = segmask_array_iter == int(class_split[0])\n for i in range(1, len(class_split)):\n bin_mask = bin_mask | (\n segmask_array_iter == int(class_split[i])\n )\n elif \"|\" in _class: # special case\n class_split = _class.split(\"|\")\n bin_mask = segmask_array_iter == int(class_split[0])\n for i in range(1, len(class_split)):\n bin_mask = bin_mask | (\n segmask_array_iter == int(class_split[i])\n )\n else:\n # assume that it is a simple int\n bin_mask = segmask_array_iter == int(_class)\n else:\n bin_mask = segmask_array_iter == int(_class)\n bin_mask = bin_mask.long()\n one_hot_stack.append(bin_mask)\n one_hot_stack = torch.stack(one_hot_stack)\n batch_stack.append(one_hot_stack)\n batch_stack = torch.stack(batch_stack)\n return batch_stack\n\n\ndef reverse_one_hot(predmask_array, class_list):\n \"\"\"\n This function creates a full segmentation mask Tensor from a one-hot-encoded mask and specified class list\n\n Args:\n predmask_array (torch.Tensor): The predicted segmentation mask Tensor.\n class_list (list): The list of classes based on which one-hot encoding needs to happen.\n\n Returns:\n torch.Tensor: The final mask torch.Tensor.\n \"\"\"\n if isinstance(predmask_array, torch.Tensor):\n array_to_consider = predmask_array.cpu().numpy()\n else:\n array_to_consider = predmask_array\n idx_argmax = np.argmax(array_to_consider, axis=0)\n final_mask = 0\n special_cases_to_check = [\"||\"]\n special_case_detected = False\n max_current = 0\n\n for _class in class_list:\n for case in special_cases_to_check:\n if isinstance(_class, str):\n if case in _class: # check if any of the special cases are present\n special_case_detected = True\n class_split = _class.split(\n case\n ) # if present, then split the sub-class\n for i in class_split: # find the max for computation later on\n if int(i) > max_current:\n max_current = int(i)\n\n if special_case_detected:\n start_idx = 0\n if (class_list[0] == 0) or (class_list[0] == \"0\"):\n start_idx = 1\n\n final_mask = np.asarray(\n predmask_array[start_idx, :, :, :], dtype=int\n ) # predmask_array[0,:,:,:].long()\n start_idx += 1\n for i in range(start_idx, len(class_list)):\n final_mask += np.asarray(\n predmask_array[0, :, :, :], dtype=int\n ) # predmask_array[i,:,:,:].long()\n # temp_sum = torch.sum(output)\n # output_2 = (max_current - torch.sum(output)) % max_current\n # test_2 = 1\n else:\n for idx, _class in enumerate(class_list):\n final_mask = final_mask + (idx_argmax == idx) * _class\n return final_mask\n\n\ndef send_model_to_device(model, amp, device, optimizer):\n \"\"\"\n This function reads the environment variable(s) and send model to correct device\n\n Args:\n model (torch.nn.Module): The model that needs to be sent to specified device.\n amp (bool): Whether automatic mixed precision is to be used.\n device (str): Device type.\n optimizer (torch.optim): The optimizer for training.\n\n Returns:\n torch.nn.Module: The model after it has been sent to specified device\n bool: Whether automatic mixed precision is to be used or not.\n torch.device: Device type.\n \"\"\"\n if device != \"cpu\":\n if os.environ.get(\"CUDA_VISIBLE_DEVICES\") is None:\n sys.exit(\n \"Please set the environment variable 'CUDA_VISIBLE_DEVICES' correctly before trying to run GANDLF on GPU\"\n )\n\n dev = os.environ.get(\"CUDA_VISIBLE_DEVICES\")\n # multi-gpu support\n # ###\n # # https://discuss.pytorch.org/t/cuda-visible-devices-make-gpu-disappear/21439/17?u=sarthakpati\n # ###\n if \",\" in dev:\n device = torch.device(\"cuda\")\n model = nn.DataParallel(model, \"[\" + dev + \"]\")\n else:\n print(\"Device requested via CUDA_VISIBLE_DEVICES: \", dev)\n print(\"Total number of CUDA devices: \", torch.cuda.device_count())\n\n # if only a single visible device, it will be indexed as '0'\n if torch.cuda.device_count() == 1:\n dev = \"0\"\n\n dev_int = int(dev)\n print(\"Device finally used: \", dev)\n # device = torch.device('cuda:' + dev)\n device = torch.device(\"cuda\")\n print(\"Sending model to aforementioned device\")\n model = model.to(device)\n print(\n \"Memory Total : \",\n round(\n torch.cuda.get_device_properties(dev_int).total_memory / 1024 ** 3,\n 1,\n ),\n \"GB, Allocated: \",\n round(torch.cuda.memory_allocated(dev_int) / 1024 ** 3, 1),\n \"GB, Cached: \",\n round(torch.cuda.memory_reserved(dev_int) / 1024 ** 3, 1),\n \"GB\",\n )\n\n print(\n \"Device - Current: %s Count: %d Name: %s Availability: %s\"\n % (\n torch.cuda.current_device(),\n torch.cuda.device_count(),\n torch.cuda.get_device_name(device),\n torch.cuda.is_available(),\n )\n )\n\n if not (optimizer is None):\n # ensuring optimizer is in correct device - https://github.com/pytorch/pytorch/issues/8741\n optimizer.load_state_dict(optimizer.state_dict())\n\n else:\n dev = -1\n device = torch.device(\"cpu\")\n model.cpu()\n amp = False\n print(\"Since Device is CPU, Mixed Precision Training is set to False\")\n\n return model, amp, device\n\n\ndef get_class_imbalance_weights(training_data_loader, parameters):\n \"\"\"\n This function calculates the penalty that is used for validation loss in multi-class problems\n\n Args:\n training_data_loader (torch.utils.data.DataLoader): The training data loader.\n parameters (dict): The parameters passed by the user yaml.\n\n Returns:\n dict: The penalty weights for different classes under consideration.\n \"\"\"\n abs_dict = {} # absolute counts for each class\n weights_dict = {} # average for \"weighted averaging\"\n penalty_dict = None # penalty for misclassification\n # basically, do this for segmentation/classification tasks\n\n if parameters[\"problem_type\"] != \"regression\":\n penalty_dict = {}\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n abs_dict[i] = 0\n penalty_dict[i] = 0\n\n penalty_loader = training_data_loader\n\n # get the weights for use for dice loss\n total_counter = 0\n\n # For regression dice penalty need not be taken account\n # For classification this should be calculated on the basis of predicted labels and mask\n # iterate through full penalty data\n for _, (subject) in enumerate(penalty_loader):\n\n # segmentation needs masks to be one-hot encoded\n if parameters[\"problem_type\"] == \"segmentation\":\n # accumulate dice weights for each label\n mask = subject[\"label\"][torchio.DATA]\n one_hot_mask = one_hot(mask, parameters[\"model\"][\"class_list\"])\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n currentNumber = torch.nonzero(\n one_hot_mask[:, i, :, :, :], as_tuple=False\n ).size(0)\n # class-specific non-zero voxels\n abs_dict[i] += currentNumber\n # total number of non-zero voxels to be considered\n total_counter += currentNumber\n\n # for classification, the value needs to be used directly\n elif parameters[\"problem_type\"] == \"classification\":\n # accumulate weights for each label\n value_to_predict = subject[\"value_0\"][0]\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n if value_to_predict == i:\n abs_dict[i] += 1\n # we only want to increase the counter for those subjects that are defined in the class_list\n total_counter += 1\n\n # Normalize class weights\n weights_dict = {key: val / total_counter for key, val in abs_dict.items()}\n\n # get the penalty values - abs_dict contains the overall number for each class in the penalty data\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n penalty = total_counter # start with the assumption that all the non-zero voxels (segmentation) or activate labels (classification) make up the penalty\n for j in range(0, len(parameters[\"model\"][\"class_list\"])):\n if i != j: # for differing classes, subtract the current weight\n penalty -= abs_dict[j]\n\n # finally, the \"penalty\" variable contains the total number of voxels/activations that are not part of the current class\n # this is to be used to weight the loss function\n # adding epsilon to avoid division by zero\n penalty_dict[i] = (penalty + sys.float_info.epsilon) / (\n total_counter + sys.float_info.epsilon\n )\n\n return penalty_dict, weights_dict\n", "path": "GANDLF/utils/tensor.py"}], "after_files": [{"content": "import os, sys\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchio\n\n\ndef one_hot(segmask_array, class_list):\n \"\"\"\n This function creates a one-hot-encoded mask from the segmentation mask Tensor and specified class list\n\n Args:\n segmask_array (torch.Tensor): The segmentation mask Tensor.\n class_list (list): The list of classes based on which one-hot encoding needs to happen.\n\n Returns:\n torch.Tensor: The one-hot encoded torch.Tensor\n \"\"\"\n batch_size = segmask_array.shape[0]\n batch_stack = []\n for b in range(batch_size):\n one_hot_stack = []\n segmask_array_iter = segmask_array[b, 0]\n bin_mask = segmask_array_iter == 0 # initialize bin_mask\n # this implementation allows users to combine logical operands\n for _class in class_list:\n if isinstance(_class, str):\n if \"||\" in _class: # special case\n class_split = _class.split(\"||\")\n bin_mask = segmask_array_iter == int(class_split[0])\n for i in range(1, len(class_split)):\n bin_mask = bin_mask | (\n segmask_array_iter == int(class_split[i])\n )\n elif \"|\" in _class: # special case\n class_split = _class.split(\"|\")\n bin_mask = segmask_array_iter == int(class_split[0])\n for i in range(1, len(class_split)):\n bin_mask = bin_mask | (\n segmask_array_iter == int(class_split[i])\n )\n else:\n # assume that it is a simple int\n bin_mask = segmask_array_iter == int(_class)\n else:\n bin_mask = segmask_array_iter == int(_class)\n bin_mask = bin_mask.long()\n one_hot_stack.append(bin_mask)\n one_hot_stack = torch.stack(one_hot_stack)\n batch_stack.append(one_hot_stack)\n batch_stack = torch.stack(batch_stack)\n return batch_stack\n\n\ndef reverse_one_hot(predmask_array, class_list):\n \"\"\"\n This function creates a full segmentation mask Tensor from a one-hot-encoded mask and specified class list\n\n Args:\n predmask_array (torch.Tensor): The predicted segmentation mask Tensor.\n class_list (list): The list of classes based on which one-hot encoding needs to happen.\n\n Returns:\n torch.Tensor: The final mask torch.Tensor.\n \"\"\"\n if isinstance(predmask_array, torch.Tensor):\n array_to_consider = predmask_array.cpu().numpy()\n else:\n array_to_consider = predmask_array\n idx_argmax = np.argmax(array_to_consider, axis=0)\n final_mask = 0\n special_cases_to_check = [\"||\"]\n special_case_detected = False\n max_current = 0\n\n for _class in class_list:\n for case in special_cases_to_check:\n if isinstance(_class, str):\n if case in _class: # check if any of the special cases are present\n special_case_detected = True\n # if present, then split the sub-class\n class_split = _class.split(case)\n for i in class_split: # find the max for computation later on\n if int(i) > max_current:\n max_current = int(i)\n\n if special_case_detected:\n start_idx = 0\n if (class_list[0] == 0) or (class_list[0] == \"0\"):\n start_idx = 1\n\n final_mask = np.asarray(predmask_array[start_idx, :, :, :], dtype=int)\n start_idx += 1\n for i in range(start_idx, len(class_list)):\n final_mask += np.asarray(\n predmask_array[0, :, :, :], dtype=int\n ) # predmask_array[i,:,:,:].long()\n # temp_sum = torch.sum(output)\n # output_2 = (max_current - torch.sum(output)) % max_current\n # test_2 = 1\n else:\n for idx, _class in enumerate(class_list):\n final_mask = final_mask + (idx_argmax == idx) * _class\n return final_mask\n\n\ndef send_model_to_device(model, amp, device, optimizer):\n \"\"\"\n This function reads the environment variable(s) and send model to correct device\n\n Args:\n model (torch.nn.Module): The model that needs to be sent to specified device.\n amp (bool): Whether automatic mixed precision is to be used.\n device (str): Device type.\n optimizer (torch.optim): The optimizer for training.\n\n Returns:\n torch.nn.Module: The model after it has been sent to specified device\n bool: Whether automatic mixed precision is to be used or not.\n torch.device: Device type.\n \"\"\"\n if device != \"cpu\":\n if os.environ.get(\"CUDA_VISIBLE_DEVICES\") is None:\n sys.exit(\n \"Please set the environment variable 'CUDA_VISIBLE_DEVICES' correctly before trying to run GANDLF on GPU\"\n )\n\n dev = os.environ.get(\"CUDA_VISIBLE_DEVICES\")\n # multi-gpu support\n # ###\n # # https://discuss.pytorch.org/t/cuda-visible-devices-make-gpu-disappear/21439/17?u=sarthakpati\n # ###\n if \",\" in dev:\n device = torch.device(\"cuda\")\n model = nn.DataParallel(model, \"[\" + dev + \"]\")\n else:\n print(\"Device requested via CUDA_VISIBLE_DEVICES: \", dev)\n print(\"Total number of CUDA devices: \", torch.cuda.device_count())\n\n # if only a single visible device, it will be indexed as '0'\n if torch.cuda.device_count() == 1:\n dev = \"0\"\n\n dev_int = int(dev)\n print(\"Device finally used: \", dev)\n # device = torch.device('cuda:' + dev)\n device = torch.device(\"cuda\")\n print(\"Sending model to aforementioned device\")\n model = model.to(device)\n print(\n \"Memory Total : \",\n round(\n torch.cuda.get_device_properties(dev_int).total_memory / 1024 ** 3,\n 1,\n ),\n \"GB, Allocated: \",\n round(torch.cuda.memory_allocated(dev_int) / 1024 ** 3, 1),\n \"GB, Cached: \",\n round(torch.cuda.memory_reserved(dev_int) / 1024 ** 3, 1),\n \"GB\",\n )\n\n print(\n \"Device - Current: %s Count: %d Name: %s Availability: %s\"\n % (\n torch.cuda.current_device(),\n torch.cuda.device_count(),\n torch.cuda.get_device_name(device),\n torch.cuda.is_available(),\n )\n )\n\n if not (optimizer is None):\n # ensuring optimizer is in correct device - https://github.com/pytorch/pytorch/issues/8741\n optimizer.load_state_dict(optimizer.state_dict())\n\n else:\n dev = -1\n device = torch.device(\"cpu\")\n model.cpu()\n amp = False\n print(\"Since Device is CPU, Mixed Precision Training is set to False\")\n\n return model, amp, device\n\n\ndef get_class_imbalance_weights(training_data_loader, parameters):\n \"\"\"\n This function calculates the penalty that is used for validation loss in multi-class problems\n\n Args:\n training_data_loader (torch.utils.data.DataLoader): The training data loader.\n parameters (dict): The parameters passed by the user yaml.\n\n Returns:\n dict: The penalty weights for different classes under consideration.\n \"\"\"\n abs_dict = {} # absolute counts for each class\n weights_dict = {} # average for \"weighted averaging\"\n penalty_dict = None # penalty for misclassification\n # basically, do this for segmentation/classification tasks\n\n if parameters[\"problem_type\"] != \"regression\":\n penalty_dict = {}\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n abs_dict[i] = 0\n penalty_dict[i] = 0\n\n penalty_loader = training_data_loader\n\n # get the weights for use for dice loss\n total_counter = 0\n\n # For regression dice penalty need not be taken account\n # For classification this should be calculated on the basis of predicted labels and mask\n # iterate through full penalty data\n for _, (subject) in enumerate(penalty_loader):\n\n # segmentation needs masks to be one-hot encoded\n if parameters[\"problem_type\"] == \"segmentation\":\n # accumulate dice weights for each label\n mask = subject[\"label\"][torchio.DATA]\n one_hot_mask = one_hot(mask, parameters[\"model\"][\"class_list\"])\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n currentNumber = torch.nonzero(\n one_hot_mask[:, i, :, :, :], as_tuple=False\n ).size(0)\n # class-specific non-zero voxels\n abs_dict[i] += currentNumber\n # total number of non-zero voxels to be considered\n total_counter += currentNumber\n\n # for classification, the value needs to be used directly\n elif parameters[\"problem_type\"] == \"classification\":\n # accumulate weights for each label\n value_to_predict = subject[\"value_0\"][0]\n for i in range(0, len(parameters[\"model\"][\"class_list\"])):\n if value_to_predict == i:\n abs_dict[i] += 1\n # we only want to increase the counter for those subjects that are defined in the class_list\n total_counter += 1\n\n # Normalize class weights\n weights_dict = {\n key: (val + sys.float_info.epsilon) / total_counter\n for key, val in abs_dict.items()\n }\n\n # get the raw penalty values\n penalty = {\n key: total_counter / (len(abs_dict) * (val + sys.float_info.epsilon))\n for key, val in abs_dict.items()\n }\n # normalize penalty to sum of 1\n penalty_sum = np.fromiter(penalty.values(), dtype=np.float64).sum()\n penalty_dict = {\n key: (val + sys.float_info.epsilon) / penalty_sum\n for key, val in penalty.items()\n }\n\n return penalty_dict, weights_dict\n", "path": "GANDLF/utils/tensor.py"}]} | 3,637 | 839 |
gh_patches_debug_2557 | rasdani/github-patches | git_diff | ManimCommunity__manim-235 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
-f broken on windows
Basically the title.
When passing -f on windows in show the video file with the default video browser (like -p does) and not in the file explorer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/__main__.py`
Content:
```
1 import inspect
2 import os
3 import platform
4 import subprocess as sp
5 import sys
6 import re
7 import traceback
8 import importlib.util
9 import types
10
11 from .config import file_writer_config
12 from .scene.scene import Scene
13 from .utils.sounds import play_error_sound
14 from .utils.sounds import play_finish_sound
15 from . import constants
16 from .logger import logger, console
17
18
19 def open_file_if_needed(file_writer):
20 if file_writer_config["quiet"]:
21 curr_stdout = sys.stdout
22 sys.stdout = open(os.devnull, "w")
23
24 open_file = any(
25 [file_writer_config["preview"], file_writer_config["show_file_in_finder"]]
26 )
27 if open_file:
28 current_os = platform.system()
29 file_paths = []
30
31 if file_writer_config["save_last_frame"]:
32 file_paths.append(file_writer.get_image_file_path())
33 if file_writer_config["write_to_movie"]:
34 file_paths.append(file_writer.get_movie_file_path())
35
36 for file_path in file_paths:
37 if current_os == "Windows":
38 os.startfile(file_path)
39 else:
40 commands = []
41 if current_os == "Linux":
42 commands.append("xdg-open")
43 elif current_os.startswith("CYGWIN"):
44 commands.append("cygstart")
45 else: # Assume macOS
46 commands.append("open")
47
48 if file_writer_config["show_file_in_finder"]:
49 commands.append("-R")
50
51 commands.append(file_path)
52
53 # commands.append("-g")
54 FNULL = open(os.devnull, "w")
55 sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)
56 FNULL.close()
57
58 if file_writer_config["quiet"]:
59 sys.stdout.close()
60 sys.stdout = curr_stdout
61
62
63 def is_child_scene(obj, module):
64 return (
65 inspect.isclass(obj)
66 and issubclass(obj, Scene)
67 and obj != Scene
68 and obj.__module__.startswith(module.__name__)
69 )
70
71
72 def prompt_user_for_choice(scene_classes):
73 num_to_class = {}
74 for count, scene_class in enumerate(scene_classes):
75 count += 1 # start with 1 instead of 0
76 name = scene_class.__name__
77 console.print(f"{count}: {name}", style="logging.level.info")
78 num_to_class[count] = scene_class
79 try:
80 user_input = console.input(
81 f"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]"
82 )
83 return [
84 num_to_class[int(num_str)]
85 for num_str in re.split(r"\s*,\s*", user_input.strip())
86 ]
87 except KeyError:
88 logger.error(constants.INVALID_NUMBER_MESSAGE)
89 sys.exit(2)
90 except EOFError:
91 sys.exit(1)
92
93
94 def get_scenes_to_render(scene_classes):
95 if not scene_classes:
96 logger.error(constants.NO_SCENE_MESSAGE)
97 return []
98 if file_writer_config["write_all"]:
99 return scene_classes
100 result = []
101 for scene_name in file_writer_config["scene_names"]:
102 found = False
103 for scene_class in scene_classes:
104 if scene_class.__name__ == scene_name:
105 result.append(scene_class)
106 found = True
107 break
108 if not found and (scene_name != ""):
109 logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))
110 if result:
111 return result
112 return (
113 [scene_classes[0]]
114 if len(scene_classes) == 1
115 else prompt_user_for_choice(scene_classes)
116 )
117
118
119 def get_scene_classes_from_module(module):
120 return [
121 member[1]
122 for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))
123 ]
124
125
126 def get_module(file_name):
127 if file_name == "-":
128 module = types.ModuleType("input_scenes")
129 logger.info(
130 "Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):"
131 )
132 code = sys.stdin.read()
133 if not code.startswith("from manim import"):
134 logger.warn(
135 "Didn't find an import statement for Manim. Importing automatically..."
136 )
137 code = "from manim import *\n" + code
138 logger.info("Rendering animation from typed code...")
139 try:
140 exec(code, module.__dict__)
141 return module
142 except Exception as e:
143 logger.error(f"Failed to render scene: {str(e)}")
144 sys.exit(2)
145 else:
146 if os.path.exists(file_name):
147 if file_name[-3:] != ".py":
148 raise Exception(f"{file_name} is not a valid Manim python script.")
149 module_name = file_name[:-3].replace(os.sep, ".").split(".")[-1]
150 spec = importlib.util.spec_from_file_location(module_name, file_name)
151 module = importlib.util.module_from_spec(spec)
152 spec.loader.exec_module(module)
153 return module
154 else:
155 raise FileNotFoundError(f"{file_name} not found")
156
157
158 def main():
159 module = get_module(file_writer_config["input_file"])
160 all_scene_classes = get_scene_classes_from_module(module)
161 scene_classes_to_render = get_scenes_to_render(all_scene_classes)
162 sound_on = file_writer_config["sound"]
163 for SceneClass in scene_classes_to_render:
164 try:
165 # By invoking, this renders the full scene
166 scene = SceneClass()
167 open_file_if_needed(scene.file_writer)
168 if sound_on:
169 play_finish_sound()
170 except Exception:
171 print("\n\n")
172 traceback.print_exc()
173 print("\n\n")
174 if sound_on:
175 play_error_sound()
176
177
178 if __name__ == "__main__":
179 main()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/__main__.py b/manim/__main__.py
--- a/manim/__main__.py
+++ b/manim/__main__.py
@@ -36,7 +36,7 @@
for file_path in file_paths:
if current_os == "Windows":
- os.startfile(file_path)
+ os.startfile(os.path.dirname(file_path))
else:
commands = []
if current_os == "Linux":
| {"golden_diff": "diff --git a/manim/__main__.py b/manim/__main__.py\n--- a/manim/__main__.py\n+++ b/manim/__main__.py\n@@ -36,7 +36,7 @@\n \n for file_path in file_paths:\n if current_os == \"Windows\":\n- os.startfile(file_path)\n+ os.startfile(os.path.dirname(file_path))\n else:\n commands = []\n if current_os == \"Linux\":\n", "issue": "-f broken on windows\nBasically the title.\r\nWhen passing -f on windows in show the video file with the default video browser (like -p does) and not in the file explorer. \n", "before_files": [{"content": "import inspect\nimport os\nimport platform\nimport subprocess as sp\nimport sys\nimport re\nimport traceback\nimport importlib.util\nimport types\n\nfrom .config import file_writer_config\nfrom .scene.scene import Scene\nfrom .utils.sounds import play_error_sound\nfrom .utils.sounds import play_finish_sound\nfrom . import constants\nfrom .logger import logger, console\n\n\ndef open_file_if_needed(file_writer):\n if file_writer_config[\"quiet\"]:\n curr_stdout = sys.stdout\n sys.stdout = open(os.devnull, \"w\")\n\n open_file = any(\n [file_writer_config[\"preview\"], file_writer_config[\"show_file_in_finder\"]]\n )\n if open_file:\n current_os = platform.system()\n file_paths = []\n\n if file_writer_config[\"save_last_frame\"]:\n file_paths.append(file_writer.get_image_file_path())\n if file_writer_config[\"write_to_movie\"]:\n file_paths.append(file_writer.get_movie_file_path())\n\n for file_path in file_paths:\n if current_os == \"Windows\":\n os.startfile(file_path)\n else:\n commands = []\n if current_os == \"Linux\":\n commands.append(\"xdg-open\")\n elif current_os.startswith(\"CYGWIN\"):\n commands.append(\"cygstart\")\n else: # Assume macOS\n commands.append(\"open\")\n\n if file_writer_config[\"show_file_in_finder\"]:\n commands.append(\"-R\")\n\n commands.append(file_path)\n\n # commands.append(\"-g\")\n FNULL = open(os.devnull, \"w\")\n sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)\n FNULL.close()\n\n if file_writer_config[\"quiet\"]:\n sys.stdout.close()\n sys.stdout = curr_stdout\n\n\ndef is_child_scene(obj, module):\n return (\n inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__)\n )\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n console.print(f\"{count}: {name}\", style=\"logging.level.info\")\n num_to_class[count] = scene_class\n try:\n user_input = console.input(\n f\"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]\"\n )\n return [\n num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())\n ]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if file_writer_config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in file_writer_config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))\n if result:\n return result\n return (\n [scene_classes[0]]\n if len(scene_classes) == 1\n else prompt_user_for_choice(scene_classes)\n )\n\n\ndef get_scene_classes_from_module(module):\n return [\n member[1]\n for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))\n ]\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\n \"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\"\n )\n code = sys.stdin.read()\n if not code.startswith(\"from manim import\"):\n logger.warn(\n \"Didn't find an import statement for Manim. Importing automatically...\"\n )\n code = \"from manim import *\\n\" + code\n logger.info(\"Rendering animation from typed code...\")\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n if file_name[-3:] != \".py\":\n raise Exception(f\"{file_name} is not a valid Manim python script.\")\n module_name = file_name[:-3].replace(os.sep, \".\").split(\".\")[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f\"{file_name} not found\")\n\n\ndef main():\n module = get_module(file_writer_config[\"input_file\"])\n all_scene_classes = get_scene_classes_from_module(module)\n scene_classes_to_render = get_scenes_to_render(all_scene_classes)\n sound_on = file_writer_config[\"sound\"]\n for SceneClass in scene_classes_to_render:\n try:\n # By invoking, this renders the full scene\n scene = SceneClass()\n open_file_if_needed(scene.file_writer)\n if sound_on:\n play_finish_sound()\n except Exception:\n print(\"\\n\\n\")\n traceback.print_exc()\n print(\"\\n\\n\")\n if sound_on:\n play_error_sound()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manim/__main__.py"}], "after_files": [{"content": "import inspect\nimport os\nimport platform\nimport subprocess as sp\nimport sys\nimport re\nimport traceback\nimport importlib.util\nimport types\n\nfrom .config import file_writer_config\nfrom .scene.scene import Scene\nfrom .utils.sounds import play_error_sound\nfrom .utils.sounds import play_finish_sound\nfrom . import constants\nfrom .logger import logger,console\n\n\ndef open_file_if_needed(file_writer):\n if file_writer_config[\"quiet\"]:\n curr_stdout = sys.stdout\n sys.stdout = open(os.devnull, \"w\")\n\n open_file = any([\n file_writer_config[\"preview\"],\n file_writer_config[\"show_file_in_finder\"]\n ])\n if open_file:\n current_os = platform.system()\n file_paths = []\n\n if file_writer_config[\"save_last_frame\"]:\n file_paths.append(file_writer.get_image_file_path())\n if file_writer_config[\"write_to_movie\"]:\n file_paths.append(file_writer.get_movie_file_path())\n\n for file_path in file_paths:\n if current_os == \"Windows\":\n os.startfile(os.path.dirname(file_path))\n else:\n commands = []\n if current_os == \"Linux\":\n commands.append(\"xdg-open\")\n elif current_os.startswith(\"CYGWIN\"):\n commands.append(\"cygstart\")\n else: # Assume macOS\n commands.append(\"open\")\n\n if file_writer_config[\"show_file_in_finder\"]:\n commands.append(\"-R\")\n\n commands.append(file_path)\n\n # commands.append(\"-g\")\n FNULL = open(os.devnull, 'w')\n sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)\n FNULL.close()\n\n if file_writer_config[\"quiet\"]:\n sys.stdout.close()\n sys.stdout = curr_stdout\n\n\ndef is_child_scene(obj, module):\n return (inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__))\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n console.print(f\"{count}: {name}\", style=\"logging.level.info\")\n num_to_class[count] = scene_class\n try:\n user_input = console.input(f\"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]\")\n return [num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if file_writer_config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in file_writer_config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(\n constants.SCENE_NOT_FOUND_MESSAGE.format(\n scene_name\n )\n )\n if result:\n return result\n return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)\n\n\ndef get_scene_classes_from_module(module):\n return [\n member[1]\n for member in inspect.getmembers(\n module,\n lambda x: is_child_scene(x, module)\n )\n ]\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\")\n code = sys.stdin.read()\n if not code.startswith(\"from manim import\"):\n logger.warn(\"Didn't find an import statement for Manim. Importing automatically...\")\n code=\"from manim import *\\n\"+code\n logger.info(\"Rendering animation from typed code...\")\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n if file_name[-3:] != \".py\":\n raise Exception(f\"{file_name} is not a valid Manim python script.\")\n module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f'{file_name} not found')\n\n\ndef main():\n module = get_module(file_writer_config[\"input_file\"])\n all_scene_classes = get_scene_classes_from_module(module)\n scene_classes_to_render = get_scenes_to_render(all_scene_classes)\n sound_on = file_writer_config[\"sound\"]\n for SceneClass in scene_classes_to_render:\n try:\n # By invoking, this renders the full scene\n scene = SceneClass()\n open_file_if_needed(scene.file_writer)\n if sound_on:\n play_finish_sound()\n except Exception:\n print(\"\\n\\n\")\n traceback.print_exc()\n print(\"\\n\\n\")\n if sound_on:\n play_error_sound()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manim/__main__.py"}]} | 1,934 | 99 |
gh_patches_debug_40384 | rasdani/github-patches | git_diff | carpentries__amy-85 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paginate display of all sites
The display of sites should be broken into pages, and each page should have navigation links to jump to other pages.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/views.py`
Content:
```
1 import yaml
2
3 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
4 from django.core.urlresolvers import reverse
5 from django.http import HttpResponseRedirect
6 from django.shortcuts import render, get_object_or_404
7 from django.views.generic.edit import CreateView, UpdateView
8 from django.db.models import Count
9
10 from workshops.models import Site, Airport, Event, Person, Task, Cohort, Skill, Trainee, Badge
11 from workshops.forms import InstructorMatchForm
12 from workshops.util import earth_distance
13
14 #------------------------------------------------------------
15
16 ITEMS_PER_PAGE = 25
17
18 #------------------------------------------------------------
19
20 def index(request):
21 '''Home page.'''
22 upcoming_events = Event.objects.upcoming_events()
23 context = {'title' : 'Home Page',
24 'upcoming_events' : upcoming_events}
25 return render(request, 'workshops/index.html', context)
26
27 #------------------------------------------------------------
28
29 SITE_FIELDS = ['domain', 'fullname', 'country', 'notes']
30
31 def all_sites(request):
32 '''List all sites.'''
33 all_sites = Site.objects.order_by('domain')
34 user_can_add = request.user.has_perm('edit')
35 context = {'title' : 'All Sites',
36 'all_sites' : all_sites,
37 'user_can_add' : user_can_add}
38 return render(request, 'workshops/all_sites.html', context)
39
40 def site_details(request, site_domain):
41 '''List details of a particular site.'''
42 site = Site.objects.get(domain=site_domain)
43 events = Event.objects.filter(site=site)
44 context = {'title' : 'Site {0}'.format(site),
45 'site' : site,
46 'events' : events}
47 return render(request, 'workshops/site.html', context)
48
49 class SiteCreate(CreateView):
50 model = Site
51 fields = SITE_FIELDS
52
53 class SiteUpdate(UpdateView):
54 model = Site
55 fields = SITE_FIELDS
56 slug_field = 'domain'
57 slug_url_kwarg = 'site_domain'
58
59 #------------------------------------------------------------
60
61 AIRPORT_FIELDS = ['iata', 'fullname', 'country', 'latitude', 'longitude']
62
63 def all_airports(request):
64 '''List all airports.'''
65 all_airports = Airport.objects.order_by('iata')
66 user_can_add = request.user.has_perm('edit')
67 context = {'title' : 'All Airports',
68 'all_airports' : all_airports,
69 'user_can_add' : user_can_add}
70 return render(request, 'workshops/all_airports.html', context)
71
72 def airport_details(request, airport_iata):
73 '''List details of a particular airport.'''
74 airport = Airport.objects.get(iata=airport_iata)
75 context = {'title' : 'Airport {0}'.format(airport),
76 'airport' : airport}
77 return render(request, 'workshops/airport.html', context)
78
79 class AirportCreate(CreateView):
80 model = Airport
81 fields = AIRPORT_FIELDS
82
83 class AirportUpdate(UpdateView):
84 model = Airport
85 fields = AIRPORT_FIELDS
86 slug_field = 'iata'
87 slug_url_kwarg = 'airport_iata'
88
89 #------------------------------------------------------------
90
91 def all_persons(request):
92 '''List all persons.'''
93 all_persons = Person.objects.order_by('family', 'personal')
94 context = {'title' : 'All Persons',
95 'all_persons' : all_persons}
96 return render(request, 'workshops/all_persons.html', context)
97
98 def person_details(request, person_id):
99 '''List details of a particular person.'''
100 person = Person.objects.get(id=person_id)
101 context = {'title' : 'Person {0}'.format(person),
102 'person' : person}
103 return render(request, 'workshops/person.html', context)
104
105 #------------------------------------------------------------
106
107 def all_events(request):
108 '''List all events.'''
109
110 all_events = Event.objects.order_by('slug')
111
112 # Get the number of items requested per page, default to 25
113 # This is important for unit testing, we need to be
114 # able to specify how many items to expect
115 items = request.GET.get('items_per_page', ITEMS_PER_PAGE)
116
117 # Only paginate if the number of items is not 'all'
118 if not items == 'all':
119
120 # If items is not an integer, set it to the default
121 try:
122 items = int(items)
123 except ValueError:
124 items = ITEMS_PER_PAGE
125
126 events_paginator = Paginator(all_events, items)
127
128 # Get the page number requested, if any
129 page = request.GET.get('page')
130
131 try:
132 events = events_paginator.page(page)
133 except PageNotAnInteger:
134 # If page is not an integer, deliver first page.
135 events = events_paginator.page(1)
136 except EmptyPage:
137 # If page is out of range (e.g. 9999), deliver last page of results.
138 events = events_paginator.page(events_paginator.num_pages)
139 else:
140 events = all_events
141
142 context = {'title' : 'All Events',
143 'all_events' : events}
144
145 return render(request, 'workshops/all_events.html', context)
146
147 def event_details(request, event_slug):
148 '''List details of a particular event.'''
149 event = Event.objects.get(slug=event_slug)
150 context = {'title' : 'Event {0}'.format(event),
151 'event' : event}
152 return render(request, 'workshops/event.html', context)
153
154 #------------------------------------------------------------
155
156 TASK_FIELDS = ['event', 'person', 'role']
157
158 def all_tasks(request):
159 '''List all tasks.'''
160 all_tasks = Task.objects.order_by('event', 'person', 'role')
161 user_can_add = request.user.has_perm('edit')
162 context = {'title' : 'All Tasks',
163 'all_tasks' : all_tasks,
164 'user_can_add' : user_can_add}
165 return render(request, 'workshops/all_tasks.html', context)
166
167 def task_details(request, event_slug, person_id, role_name):
168 '''List details of a particular task.'''
169 task = Task.objects.get(event__slug=event_slug, person__id=person_id, role__name=role_name)
170 context = {'title' : 'Task {0}'.format(task),
171 'task' : task}
172 return render(request, 'workshops/task.html', context)
173
174 class TaskCreate(CreateView):
175 model = Task
176 fields = TASK_FIELDS
177
178 class TaskUpdate(UpdateView):
179 model = Task
180 fields = TASK_FIELDS
181 pk_url_kwarg = 'task_id'
182
183 def get_object(self):
184 """
185 Returns the object the view is displaying.
186 """
187
188 event_slug = self.kwargs.get('event_slug', None)
189 person_id = self.kwargs.get('person_id', None)
190 role_name = self.kwargs.get('role_name', None)
191
192 return get_object_or_404(Task, event__slug=event_slug, person__id=person_id, role__name=role_name)
193
194 #------------------------------------------------------------
195
196 COHORT_FIELDS = ['name', 'start', 'active', 'venue', 'qualifies']
197
198 def all_cohorts(request):
199 '''List all cohorts.'''
200 all_cohorts = Cohort.objects.order_by('start')
201 user_can_add = request.user.has_perm('edit')
202 context = {'title' : 'All Cohorts',
203 'all_cohorts' : all_cohorts,
204 'user_can_add' : user_can_add}
205 return render(request, 'workshops/all_cohorts.html', context)
206
207 def cohort_details(request, cohort_name):
208 '''List details of a particular cohort.'''
209 cohort = Cohort.objects.get(name=cohort_name)
210 trainees = Trainee.objects.filter(cohort_id=cohort.id)
211 context = {'title' : 'Cohort {0}'.format(cohort),
212 'cohort' : cohort,
213 'trainees' : trainees}
214 return render(request, 'workshops/cohort.html', context)
215
216 class CohortCreate(CreateView):
217 model = Cohort
218 fields = COHORT_FIELDS
219
220 class CohortUpdate(UpdateView):
221 model = Cohort
222 fields = COHORT_FIELDS
223 slug_field = 'name'
224 slug_url_kwarg = 'cohort_name'
225
226 #------------------------------------------------------------
227
228 def match(request):
229 persons = None
230
231 if request.method == 'POST':
232 form = InstructorMatchForm(request.POST)
233 if form.is_valid():
234
235 # Filter by skills.
236 persons = Person.objects.filter(airport__isnull=False)
237 skills = []
238 for s in Skill.objects.all():
239 if form.cleaned_data[s.name]:
240 skills.append(s)
241 persons = persons.have_skills(skills)
242
243 # Sort by location.
244 loc = (float(form.cleaned_data['latitude']),
245 float(form.cleaned_data['longitude']))
246 persons = [(earth_distance(loc, (p.airport.latitude, p.airport.longitude)), p)
247 for p in persons]
248 persons.sort()
249 persons = [x[1] for x in persons[:10]]
250
251 else:
252 pass # FIXME: error message
253
254 # if a GET (or any other method) we'll create a blank form
255 else:
256 form = InstructorMatchForm()
257
258 return render(request, 'workshops/match.html', {'form': form, 'persons' : persons})
259
260 #------------------------------------------------------------
261
262 def _export_badges():
263 '''Collect badge data as YAML.'''
264 result = {}
265 for badge in Badge.objects.all():
266 persons = Person.objects.filter(award__badge_id=badge.id)
267 result[badge.name] = [{"user" : p.slug, "name" : p.fullname()} for p in persons]
268 return result
269
270 def _export_instructors():
271 '''Collect instructor airport locations as YAML.'''
272 # Exclude airports with no instructors, and add the number of instructors per airport
273 airports = Airport.objects.exclude(person=None).annotate(num_persons=Count('person'))
274 return [{'airport' : str(a.fullname),
275 'latlng' : '{0},{1}'.format(a.latitude, a.longitude),
276 'count' : a.num_persons}
277 for a in airports]
278
279 def export(request, name):
280 '''Export data as YAML for inclusion in main web site.'''
281 data = None
282 if name == 'badges':
283 title, data = 'Badges', _export_badges()
284 elif name == 'instructors':
285 title, data = 'Instructor Locations', _export_instructors()
286 else:
287 title, data = 'Error', None # FIXME - need an error message
288 context = {'title' : title,
289 'data' : data}
290 return render(request, 'workshops/export.html', context)
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/workshops/views.py b/workshops/views.py
--- a/workshops/views.py
+++ b/workshops/views.py
@@ -30,10 +30,33 @@
def all_sites(request):
'''List all sites.'''
+
all_sites = Site.objects.order_by('domain')
+ items, page = _get_pagination_items(request)
+
+ # Show everything.
+ if items == 'all':
+ sites = all_sites
+
+ # Show selected items.
+ else:
+ sites_paginator = Paginator(all_sites, items)
+
+ # Select the sites.
+ try:
+ sites = sites_paginator.page(page)
+
+ # If page is not an integer, deliver first page.
+ except PageNotAnInteger:
+ sites = sites_paginator.page(1)
+
+ # If page is out of range, deliver last page of results.
+ except EmptyPage:
+ sites = sites_paginator.page(sites_paginator.num_pages)
+
user_can_add = request.user.has_perm('edit')
context = {'title' : 'All Sites',
- 'all_sites' : all_sites,
+ 'all_sites' : sites,
'user_can_add' : user_can_add}
return render(request, 'workshops/all_sites.html', context)
@@ -108,36 +131,27 @@
'''List all events.'''
all_events = Event.objects.order_by('slug')
+ items, page = _get_pagination_items(request)
- # Get the number of items requested per page, default to 25
- # This is important for unit testing, we need to be
- # able to specify how many items to expect
- items = request.GET.get('items_per_page', ITEMS_PER_PAGE)
-
- # Only paginate if the number of items is not 'all'
- if not items == 'all':
-
- # If items is not an integer, set it to the default
- try:
- items = int(items)
- except ValueError:
- items = ITEMS_PER_PAGE
+ # Show everything.
+ if items == 'all':
+ events = all_events
+ # Show selected items.
+ else:
events_paginator = Paginator(all_events, items)
- # Get the page number requested, if any
- page = request.GET.get('page')
-
+ # Select the events.
try:
events = events_paginator.page(page)
+
+ # If page is not an integer, deliver first page.
except PageNotAnInteger:
- # If page is not an integer, deliver first page.
events = events_paginator.page(1)
+
+ # If page is out of range, deliver last page of results.
except EmptyPage:
- # If page is out of range (e.g. 9999), deliver last page of results.
events = events_paginator.page(events_paginator.num_pages)
- else:
- events = all_events
context = {'title' : 'All Events',
'all_events' : events}
@@ -288,3 +302,20 @@
context = {'title' : title,
'data' : data}
return render(request, 'workshops/export.html', context)
+
+#------------------------------------------------------------
+
+def _get_pagination_items(request):
+ '''Determine how much pagination to do.'''
+
+ items = request.GET.get('items_per_page', ITEMS_PER_PAGE)
+
+ if items != 'all':
+ try:
+ items = int(items)
+ except ValueError:
+ items = ITEMS_PER_PAGE
+
+ page = request.GET.get('page')
+
+ return items, page
| {"golden_diff": "diff --git a/workshops/views.py b/workshops/views.py\n--- a/workshops/views.py\n+++ b/workshops/views.py\n@@ -30,10 +30,33 @@\n \n def all_sites(request):\n '''List all sites.'''\n+\n all_sites = Site.objects.order_by('domain')\n+ items, page = _get_pagination_items(request)\n+\n+ # Show everything.\n+ if items == 'all':\n+ sites = all_sites\n+\n+ # Show selected items.\n+ else:\n+ sites_paginator = Paginator(all_sites, items)\n+\n+ # Select the sites.\n+ try:\n+ sites = sites_paginator.page(page)\n+\n+ # If page is not an integer, deliver first page.\n+ except PageNotAnInteger:\n+ sites = sites_paginator.page(1)\n+\n+ # If page is out of range, deliver last page of results.\n+ except EmptyPage:\n+ sites = sites_paginator.page(sites_paginator.num_pages)\n+\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Sites',\n- 'all_sites' : all_sites,\n+ 'all_sites' : sites,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_sites.html', context)\n \n@@ -108,36 +131,27 @@\n '''List all events.'''\n \n all_events = Event.objects.order_by('slug')\n+ items, page = _get_pagination_items(request)\n \n- # Get the number of items requested per page, default to 25\n- # This is important for unit testing, we need to be\n- # able to specify how many items to expect\n- items = request.GET.get('items_per_page', ITEMS_PER_PAGE)\n-\n- # Only paginate if the number of items is not 'all'\n- if not items == 'all':\n-\n- # If items is not an integer, set it to the default\n- try:\n- items = int(items)\n- except ValueError:\n- items = ITEMS_PER_PAGE\n+ # Show everything.\n+ if items == 'all':\n+ events = all_events\n \n+ # Show selected items.\n+ else:\n events_paginator = Paginator(all_events, items)\n \n- # Get the page number requested, if any\n- page = request.GET.get('page')\n-\n+ # Select the events.\n try:\n events = events_paginator.page(page)\n+\n+ # If page is not an integer, deliver first page.\n except PageNotAnInteger:\n- # If page is not an integer, deliver first page.\n events = events_paginator.page(1)\n+\n+ # If page is out of range, deliver last page of results.\n except EmptyPage:\n- # If page is out of range (e.g. 9999), deliver last page of results.\n events = events_paginator.page(events_paginator.num_pages)\n- else:\n- events = all_events\n \n context = {'title' : 'All Events',\n 'all_events' : events}\n@@ -288,3 +302,20 @@\n context = {'title' : title,\n 'data' : data}\n return render(request, 'workshops/export.html', context)\n+\n+#------------------------------------------------------------\n+\n+def _get_pagination_items(request):\n+ '''Determine how much pagination to do.'''\n+\n+ items = request.GET.get('items_per_page', ITEMS_PER_PAGE)\n+\n+ if items != 'all':\n+ try:\n+ items = int(items)\n+ except ValueError:\n+ items = ITEMS_PER_PAGE\n+\n+ page = request.GET.get('page')\n+\n+ return items, page\n", "issue": "Paginate display of all sites\nThe display of sites should be broken into pages, and each page should have navigation links to jump to other pages.\n\n", "before_files": [{"content": "import yaml\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.db.models import Count\n\nfrom workshops.models import Site, Airport, Event, Person, Task, Cohort, Skill, Trainee, Badge\nfrom workshops.forms import InstructorMatchForm\nfrom workshops.util import earth_distance\n\n#------------------------------------------------------------\n\nITEMS_PER_PAGE = 25\n\n#------------------------------------------------------------\n\ndef index(request):\n '''Home page.'''\n upcoming_events = Event.objects.upcoming_events()\n context = {'title' : 'Home Page',\n 'upcoming_events' : upcoming_events}\n return render(request, 'workshops/index.html', context)\n\n#------------------------------------------------------------\n\nSITE_FIELDS = ['domain', 'fullname', 'country', 'notes']\n\ndef all_sites(request):\n '''List all sites.'''\n all_sites = Site.objects.order_by('domain')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Sites',\n 'all_sites' : all_sites,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_sites.html', context)\n\ndef site_details(request, site_domain):\n '''List details of a particular site.'''\n site = Site.objects.get(domain=site_domain)\n events = Event.objects.filter(site=site)\n context = {'title' : 'Site {0}'.format(site),\n 'site' : site,\n 'events' : events}\n return render(request, 'workshops/site.html', context)\n\nclass SiteCreate(CreateView):\n model = Site\n fields = SITE_FIELDS\n\nclass SiteUpdate(UpdateView):\n model = Site\n fields = SITE_FIELDS\n slug_field = 'domain'\n slug_url_kwarg = 'site_domain'\n\n#------------------------------------------------------------\n\nAIRPORT_FIELDS = ['iata', 'fullname', 'country', 'latitude', 'longitude']\n\ndef all_airports(request):\n '''List all airports.'''\n all_airports = Airport.objects.order_by('iata')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Airports',\n 'all_airports' : all_airports,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_airports.html', context)\n\ndef airport_details(request, airport_iata):\n '''List details of a particular airport.'''\n airport = Airport.objects.get(iata=airport_iata)\n context = {'title' : 'Airport {0}'.format(airport),\n 'airport' : airport}\n return render(request, 'workshops/airport.html', context)\n\nclass AirportCreate(CreateView):\n model = Airport\n fields = AIRPORT_FIELDS\n\nclass AirportUpdate(UpdateView):\n model = Airport\n fields = AIRPORT_FIELDS\n slug_field = 'iata'\n slug_url_kwarg = 'airport_iata'\n\n#------------------------------------------------------------\n\ndef all_persons(request):\n '''List all persons.'''\n all_persons = Person.objects.order_by('family', 'personal')\n context = {'title' : 'All Persons',\n 'all_persons' : all_persons}\n return render(request, 'workshops/all_persons.html', context)\n\ndef person_details(request, person_id):\n '''List details of a particular person.'''\n person = Person.objects.get(id=person_id)\n context = {'title' : 'Person {0}'.format(person),\n 'person' : person}\n return render(request, 'workshops/person.html', context)\n\n#------------------------------------------------------------\n\ndef all_events(request):\n '''List all events.'''\n\n all_events = Event.objects.order_by('slug')\n\n # Get the number of items requested per page, default to 25\n # This is important for unit testing, we need to be\n # able to specify how many items to expect\n items = request.GET.get('items_per_page', ITEMS_PER_PAGE)\n\n # Only paginate if the number of items is not 'all'\n if not items == 'all':\n\n # If items is not an integer, set it to the default\n try:\n items = int(items)\n except ValueError:\n items = ITEMS_PER_PAGE\n\n events_paginator = Paginator(all_events, items)\n\n # Get the page number requested, if any\n page = request.GET.get('page')\n\n try:\n events = events_paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n events = events_paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n events = events_paginator.page(events_paginator.num_pages)\n else:\n events = all_events\n\n context = {'title' : 'All Events',\n 'all_events' : events}\n\n return render(request, 'workshops/all_events.html', context)\n\ndef event_details(request, event_slug):\n '''List details of a particular event.'''\n event = Event.objects.get(slug=event_slug)\n context = {'title' : 'Event {0}'.format(event),\n 'event' : event}\n return render(request, 'workshops/event.html', context)\n\n#------------------------------------------------------------\n\nTASK_FIELDS = ['event', 'person', 'role']\n\ndef all_tasks(request):\n '''List all tasks.'''\n all_tasks = Task.objects.order_by('event', 'person', 'role')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Tasks',\n 'all_tasks' : all_tasks,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_tasks.html', context)\n\ndef task_details(request, event_slug, person_id, role_name):\n '''List details of a particular task.'''\n task = Task.objects.get(event__slug=event_slug, person__id=person_id, role__name=role_name)\n context = {'title' : 'Task {0}'.format(task),\n 'task' : task}\n return render(request, 'workshops/task.html', context)\n\nclass TaskCreate(CreateView):\n model = Task\n fields = TASK_FIELDS\n\nclass TaskUpdate(UpdateView):\n model = Task\n fields = TASK_FIELDS\n pk_url_kwarg = 'task_id'\n\n def get_object(self):\n \"\"\"\n Returns the object the view is displaying.\n \"\"\"\n\n event_slug = self.kwargs.get('event_slug', None)\n person_id = self.kwargs.get('person_id', None)\n role_name = self.kwargs.get('role_name', None)\n\n return get_object_or_404(Task, event__slug=event_slug, person__id=person_id, role__name=role_name)\n\n#------------------------------------------------------------\n\nCOHORT_FIELDS = ['name', 'start', 'active', 'venue', 'qualifies']\n\ndef all_cohorts(request):\n '''List all cohorts.'''\n all_cohorts = Cohort.objects.order_by('start')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Cohorts',\n 'all_cohorts' : all_cohorts,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_cohorts.html', context)\n\ndef cohort_details(request, cohort_name):\n '''List details of a particular cohort.'''\n cohort = Cohort.objects.get(name=cohort_name)\n trainees = Trainee.objects.filter(cohort_id=cohort.id)\n context = {'title' : 'Cohort {0}'.format(cohort),\n 'cohort' : cohort,\n 'trainees' : trainees}\n return render(request, 'workshops/cohort.html', context)\n\nclass CohortCreate(CreateView):\n model = Cohort\n fields = COHORT_FIELDS\n\nclass CohortUpdate(UpdateView):\n model = Cohort\n fields = COHORT_FIELDS\n slug_field = 'name'\n slug_url_kwarg = 'cohort_name'\n\n#------------------------------------------------------------\n\ndef match(request):\n persons = None\n\n if request.method == 'POST':\n form = InstructorMatchForm(request.POST)\n if form.is_valid():\n\n # Filter by skills.\n persons = Person.objects.filter(airport__isnull=False)\n skills = []\n for s in Skill.objects.all():\n if form.cleaned_data[s.name]:\n skills.append(s)\n persons = persons.have_skills(skills)\n\n # Sort by location.\n loc = (float(form.cleaned_data['latitude']),\n float(form.cleaned_data['longitude']))\n persons = [(earth_distance(loc, (p.airport.latitude, p.airport.longitude)), p)\n for p in persons]\n persons.sort()\n persons = [x[1] for x in persons[:10]]\n\n else:\n pass # FIXME: error message\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = InstructorMatchForm()\n\n return render(request, 'workshops/match.html', {'form': form, 'persons' : persons})\n\n#------------------------------------------------------------\n\ndef _export_badges():\n '''Collect badge data as YAML.'''\n result = {}\n for badge in Badge.objects.all():\n persons = Person.objects.filter(award__badge_id=badge.id)\n result[badge.name] = [{\"user\" : p.slug, \"name\" : p.fullname()} for p in persons]\n return result\n\ndef _export_instructors():\n '''Collect instructor airport locations as YAML.'''\n # Exclude airports with no instructors, and add the number of instructors per airport\n airports = Airport.objects.exclude(person=None).annotate(num_persons=Count('person'))\n return [{'airport' : str(a.fullname),\n 'latlng' : '{0},{1}'.format(a.latitude, a.longitude),\n 'count' : a.num_persons}\n for a in airports]\n\ndef export(request, name):\n '''Export data as YAML for inclusion in main web site.'''\n data = None\n if name == 'badges':\n title, data = 'Badges', _export_badges()\n elif name == 'instructors':\n title, data = 'Instructor Locations', _export_instructors()\n else:\n title, data = 'Error', None # FIXME - need an error message\n context = {'title' : title,\n 'data' : data}\n return render(request, 'workshops/export.html', context)\n", "path": "workshops/views.py"}], "after_files": [{"content": "import yaml\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.db.models import Count\n\nfrom workshops.models import Site, Airport, Event, Person, Task, Cohort, Skill, Trainee, Badge\nfrom workshops.forms import InstructorMatchForm\nfrom workshops.util import earth_distance\n\n#------------------------------------------------------------\n\nITEMS_PER_PAGE = 25\n\n#------------------------------------------------------------\n\ndef index(request):\n '''Home page.'''\n upcoming_events = Event.objects.upcoming_events()\n context = {'title' : 'Home Page',\n 'upcoming_events' : upcoming_events}\n return render(request, 'workshops/index.html', context)\n\n#------------------------------------------------------------\n\nSITE_FIELDS = ['domain', 'fullname', 'country', 'notes']\n\ndef all_sites(request):\n '''List all sites.'''\n\n all_sites = Site.objects.order_by('domain')\n items, page = _get_pagination_items(request)\n\n # Show everything.\n if items == 'all':\n sites = all_sites\n\n # Show selected items.\n else:\n sites_paginator = Paginator(all_sites, items)\n\n # Select the sites.\n try:\n sites = sites_paginator.page(page)\n\n # If page is not an integer, deliver first page.\n except PageNotAnInteger:\n sites = sites_paginator.page(1)\n\n # If page is out of range, deliver last page of results.\n except EmptyPage:\n sites = sites_paginator.page(sites_paginator.num_pages)\n\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Sites',\n 'all_sites' : sites,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_sites.html', context)\n\ndef site_details(request, site_domain):\n '''List details of a particular site.'''\n site = Site.objects.get(domain=site_domain)\n events = Event.objects.filter(site=site)\n context = {'title' : 'Site {0}'.format(site),\n 'site' : site,\n 'events' : events}\n return render(request, 'workshops/site.html', context)\n\nclass SiteCreate(CreateView):\n model = Site\n fields = SITE_FIELDS\n\nclass SiteUpdate(UpdateView):\n model = Site\n fields = SITE_FIELDS\n slug_field = 'domain'\n slug_url_kwarg = 'site_domain'\n\n#------------------------------------------------------------\n\nAIRPORT_FIELDS = ['iata', 'fullname', 'country', 'latitude', 'longitude']\n\ndef all_airports(request):\n '''List all airports.'''\n all_airports = Airport.objects.order_by('iata')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Airports',\n 'all_airports' : all_airports,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_airports.html', context)\n\ndef airport_details(request, airport_iata):\n '''List details of a particular airport.'''\n airport = Airport.objects.get(iata=airport_iata)\n context = {'title' : 'Airport {0}'.format(airport),\n 'airport' : airport}\n return render(request, 'workshops/airport.html', context)\n\nclass AirportCreate(CreateView):\n model = Airport\n fields = AIRPORT_FIELDS\n\nclass AirportUpdate(UpdateView):\n model = Airport\n fields = AIRPORT_FIELDS\n slug_field = 'iata'\n slug_url_kwarg = 'airport_iata'\n\n#------------------------------------------------------------\n\ndef all_persons(request):\n '''List all persons.'''\n all_persons = Person.objects.order_by('family', 'personal')\n context = {'title' : 'All Persons',\n 'all_persons' : all_persons}\n return render(request, 'workshops/all_persons.html', context)\n\ndef person_details(request, person_id):\n '''List details of a particular person.'''\n person = Person.objects.get(id=person_id)\n context = {'title' : 'Person {0}'.format(person),\n 'person' : person}\n return render(request, 'workshops/person.html', context)\n\n#------------------------------------------------------------\n\ndef all_events(request):\n '''List all events.'''\n\n all_events = Event.objects.order_by('slug')\n items, page = _get_pagination_items(request)\n\n # Show everything.\n if items == 'all':\n events = all_events\n\n # Show selected items.\n else:\n events_paginator = Paginator(all_events, items)\n\n # Select the events.\n try:\n events = events_paginator.page(page)\n\n # If page is not an integer, deliver first page.\n except PageNotAnInteger:\n events = events_paginator.page(1)\n\n # If page is out of range, deliver last page of results.\n except EmptyPage:\n events = events_paginator.page(events_paginator.num_pages)\n\n context = {'title' : 'All Events',\n 'all_events' : events}\n\n return render(request, 'workshops/all_events.html', context)\n\ndef event_details(request, event_slug):\n '''List details of a particular event.'''\n event = Event.objects.get(slug=event_slug)\n context = {'title' : 'Event {0}'.format(event),\n 'event' : event}\n return render(request, 'workshops/event.html', context)\n\n#------------------------------------------------------------\n\nTASK_FIELDS = ['event', 'person', 'role']\n\ndef all_tasks(request):\n '''List all tasks.'''\n all_tasks = Task.objects.order_by('event', 'person', 'role')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Tasks',\n 'all_tasks' : all_tasks,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_tasks.html', context)\n\ndef task_details(request, event_slug, person_id, role_name):\n '''List details of a particular task.'''\n task = Task.objects.get(event__slug=event_slug, person__id=person_id, role__name=role_name)\n context = {'title' : 'Task {0}'.format(task),\n 'task' : task}\n return render(request, 'workshops/task.html', context)\n\nclass TaskCreate(CreateView):\n model = Task\n fields = TASK_FIELDS\n\nclass TaskUpdate(UpdateView):\n model = Task\n fields = TASK_FIELDS\n pk_url_kwarg = 'task_id'\n\n def get_object(self):\n \"\"\"\n Returns the object the view is displaying.\n \"\"\"\n\n event_slug = self.kwargs.get('event_slug', None)\n person_id = self.kwargs.get('person_id', None)\n role_name = self.kwargs.get('role_name', None)\n\n return get_object_or_404(Task, event__slug=event_slug, person__id=person_id, role__name=role_name)\n\n#------------------------------------------------------------\n\nCOHORT_FIELDS = ['name', 'start', 'active', 'venue', 'qualifies']\n\ndef all_cohorts(request):\n '''List all cohorts.'''\n all_cohorts = Cohort.objects.order_by('start')\n user_can_add = request.user.has_perm('edit')\n context = {'title' : 'All Cohorts',\n 'all_cohorts' : all_cohorts,\n 'user_can_add' : user_can_add}\n return render(request, 'workshops/all_cohorts.html', context)\n\ndef cohort_details(request, cohort_name):\n '''List details of a particular cohort.'''\n cohort = Cohort.objects.get(name=cohort_name)\n trainees = Trainee.objects.filter(cohort_id=cohort.id)\n context = {'title' : 'Cohort {0}'.format(cohort),\n 'cohort' : cohort,\n 'trainees' : trainees}\n return render(request, 'workshops/cohort.html', context)\n\nclass CohortCreate(CreateView):\n model = Cohort\n fields = COHORT_FIELDS\n\nclass CohortUpdate(UpdateView):\n model = Cohort\n fields = COHORT_FIELDS\n slug_field = 'name'\n slug_url_kwarg = 'cohort_name'\n\n#------------------------------------------------------------\n\ndef match(request):\n persons = None\n\n if request.method == 'POST':\n form = InstructorMatchForm(request.POST)\n if form.is_valid():\n\n # Filter by skills.\n persons = Person.objects.filter(airport__isnull=False)\n skills = []\n for s in Skill.objects.all():\n if form.cleaned_data[s.name]:\n skills.append(s)\n persons = persons.have_skills(skills)\n\n # Sort by location.\n loc = (float(form.cleaned_data['latitude']),\n float(form.cleaned_data['longitude']))\n persons = [(earth_distance(loc, (p.airport.latitude, p.airport.longitude)), p)\n for p in persons]\n persons.sort()\n persons = [x[1] for x in persons[:10]]\n\n else:\n pass # FIXME: error message\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = InstructorMatchForm()\n\n return render(request, 'workshops/match.html', {'form': form, 'persons' : persons})\n\n#------------------------------------------------------------\n\ndef _export_badges():\n '''Collect badge data as YAML.'''\n result = {}\n for badge in Badge.objects.all():\n persons = Person.objects.filter(award__badge_id=badge.id)\n result[badge.name] = [{\"user\" : p.slug, \"name\" : p.fullname()} for p in persons]\n return result\n\ndef _export_instructors():\n '''Collect instructor airport locations as YAML.'''\n # Exclude airports with no instructors, and add the number of instructors per airport\n airports = Airport.objects.exclude(person=None).annotate(num_persons=Count('person'))\n return [{'airport' : str(a.fullname),\n 'latlng' : '{0},{1}'.format(a.latitude, a.longitude),\n 'count' : a.num_persons}\n for a in airports]\n\ndef export(request, name):\n '''Export data as YAML for inclusion in main web site.'''\n data = None\n if name == 'badges':\n title, data = 'Badges', _export_badges()\n elif name == 'instructors':\n title, data = 'Instructor Locations', _export_instructors()\n else:\n title, data = 'Error', None # FIXME - need an error message\n context = {'title' : title,\n 'data' : data}\n return render(request, 'workshops/export.html', context)\n\n#------------------------------------------------------------\n\ndef _get_pagination_items(request):\n '''Determine how much pagination to do.'''\n\n items = request.GET.get('items_per_page', ITEMS_PER_PAGE)\n\n if items != 'all':\n try:\n items = int(items)\n except ValueError:\n items = ITEMS_PER_PAGE\n\n page = request.GET.get('page')\n\n return items, page\n", "path": "workshops/views.py"}]} | 3,332 | 820 |
gh_patches_debug_8898 | rasdani/github-patches | git_diff | speechbrain__speechbrain-71 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Learning rate printing
Now it will only print 0.00. Maybe we should print it with scientific notation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `speechbrain/utils/train_logger.py`
Content:
```
1 import logging
2 from speechbrain.utils.edit_distance import wer_summary
3
4 logger = logging.getLogger(__name__)
5
6
7 class TrainLogger:
8 """Abstract class defining an interface for training loggers."""
9
10 def log_stats(
11 self,
12 stats_meta,
13 train_stats=None,
14 valid_stats=None,
15 test_stats=None,
16 verbose=False,
17 ):
18 """Log the stats for one epoch.
19
20 Arguments
21 ---------
22 stats_meta : dict of str:scalar pairs
23 Meta information about the stats (e.g. epoch, learning-rate, etc.)
24 train_stats : dict of str:list pairs
25 Each loss type is represented with a str : list pair including
26 all the values for the training pass.
27 valid_stats : dict of str:list pairs
28 Each loss type is represented with a str : list pair including
29 all the values for the validation pass.
30 test_stats : dict of str:list pairs
31 Each loss type is represented with a str : list pair including
32 all the values for the test pass.
33 verbose : bool
34 Whether to also put logging information to the standard logger.
35 """
36 raise NotImplementedError
37
38
39 class FileTrainLogger(TrainLogger):
40 """Text logger of training information
41
42 Arguments
43 ---------
44 save_file : str
45 The file to use for logging train information.
46 summary_fns : dict of str:function pairs
47 Each summary function should take a list produced as output
48 from a training/validation pass and summarize it to a single scalar.
49 """
50
51 def __init__(self, save_file, summary_fns):
52 self.save_file = save_file
53 self.summary_fns = summary_fns
54
55 def _item_to_string(self, key, value, dataset=None):
56 """Convert one item to string, handling floats"""
57 if isinstance(value, float):
58 value = f"{value:.2f}"
59 if dataset is not None:
60 key = f"{dataset} {key}"
61 return f"{key}: {value}"
62
63 def _stats_to_string(self, stats, dataset=None):
64 """Convert all stats to a single string summary"""
65 return ", ".join(
66 [self._item_to_string(k, v, dataset) for k, v in stats.items()]
67 )
68
69 def log_stats(
70 self,
71 stats_meta,
72 train_stats=None,
73 valid_stats=None,
74 test_stats=None,
75 verbose=True,
76 ):
77 """See TrainLogger.log_stats()"""
78 string_summary = self._stats_to_string(stats_meta)
79 for dataset, stats in [
80 ("train", train_stats),
81 ("valid", valid_stats),
82 ("test", test_stats),
83 ]:
84 if stats is None:
85 continue
86 summary = {}
87 for stat, value_list in stats.items():
88 summary[stat] = self.summary_fns[stat](value_list)
89 string_summary += " - " + self._stats_to_string(summary, dataset)
90
91 with open(self.save_file, "a") as fout:
92 print(string_summary, file=fout)
93 if verbose:
94 logger.info(string_summary)
95
96
97 class TensorboardLogger(TrainLogger):
98 """Logs training information in the format required by Tensorboard.
99
100 Arguments
101 ---------
102 save_dir : str
103 A directory for storing all the relevant logs
104
105 Raises
106 ------
107 ImportError if Tensorboard is not installed.
108 """
109
110 def __init__(self, save_dir):
111 self.save_dir = save_dir
112
113 # Raises ImportError if TensorBoard is not installed
114 from torch.utils.tensorboard import SummaryWriter
115
116 self.writer = SummaryWriter(self.save_dir)
117 self.global_step = {"train": {}, "valid": {}, "meta": 0}
118
119 def log_stats(
120 self,
121 stats_meta,
122 train_stats=None,
123 valid_stats=None,
124 test_stats=None,
125 verbose=False,
126 ):
127 """See TrainLogger.log_stats()"""
128 self.global_step["meta"] += 1
129 for name, value in stats_meta.items():
130 self.writer.add_scalar(name, value, self.global_step["meta"])
131
132 for dataset, stats in [
133 ("train", train_stats),
134 ("valid", valid_stats),
135 ("test", test_stats),
136 ]:
137 if stats is None:
138 continue
139 for stat, value_list in stats.items():
140 if stat not in self.global_step[dataset]:
141 self.global_step[dataset][stat] = 0
142 tag = f"{stat}/{dataset}"
143 for value in value_list:
144 new_global_step = self.global_step[dataset][stat] + 1
145 self.writer.add_scalar(tag, value, new_global_step)
146 self.global_step[dataset][stat] = new_global_step
147
148
149 def summarize_average(stat_list):
150 return float(sum(stat_list) / len(stat_list))
151
152
153 def summarize_error_rate(stat_list):
154 summary = wer_summary(stat_list)
155 return summary["WER"]
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/speechbrain/utils/train_logger.py b/speechbrain/utils/train_logger.py
--- a/speechbrain/utils/train_logger.py
+++ b/speechbrain/utils/train_logger.py
@@ -54,8 +54,10 @@
def _item_to_string(self, key, value, dataset=None):
"""Convert one item to string, handling floats"""
- if isinstance(value, float):
+ if isinstance(value, float) and 0.01 < value < 100.0:
value = f"{value:.2f}"
+ elif isinstance(value, float):
+ value = f"{value:.2e}"
if dataset is not None:
key = f"{dataset} {key}"
return f"{key}: {value}"
| {"golden_diff": "diff --git a/speechbrain/utils/train_logger.py b/speechbrain/utils/train_logger.py\n--- a/speechbrain/utils/train_logger.py\n+++ b/speechbrain/utils/train_logger.py\n@@ -54,8 +54,10 @@\n \n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n- if isinstance(value, float):\n+ if isinstance(value, float) and 0.01 < value < 100.0:\n value = f\"{value:.2f}\"\n+ elif isinstance(value, float):\n+ value = f\"{value:.2e}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n", "issue": "Learning rate printing\nNow it will only print 0.00. Maybe we should print it with scientific notation.\n", "before_files": [{"content": "import logging\nfrom speechbrain.utils.edit_distance import wer_summary\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrainLogger:\n \"\"\"Abstract class defining an interface for training loggers.\"\"\"\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"Log the stats for one epoch.\n\n Arguments\n ---------\n stats_meta : dict of str:scalar pairs\n Meta information about the stats (e.g. epoch, learning-rate, etc.)\n train_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the training pass.\n valid_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the validation pass.\n test_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the test pass.\n verbose : bool\n Whether to also put logging information to the standard logger.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileTrainLogger(TrainLogger):\n \"\"\"Text logger of training information\n\n Arguments\n ---------\n save_file : str\n The file to use for logging train information.\n summary_fns : dict of str:function pairs\n Each summary function should take a list produced as output\n from a training/validation pass and summarize it to a single scalar.\n \"\"\"\n\n def __init__(self, save_file, summary_fns):\n self.save_file = save_file\n self.summary_fns = summary_fns\n\n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n if isinstance(value, float):\n value = f\"{value:.2f}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n\n def _stats_to_string(self, stats, dataset=None):\n \"\"\"Convert all stats to a single string summary\"\"\"\n return \", \".join(\n [self._item_to_string(k, v, dataset) for k, v in stats.items()]\n )\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=True,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n string_summary = self._stats_to_string(stats_meta)\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n summary = {}\n for stat, value_list in stats.items():\n summary[stat] = self.summary_fns[stat](value_list)\n string_summary += \" - \" + self._stats_to_string(summary, dataset)\n\n with open(self.save_file, \"a\") as fout:\n print(string_summary, file=fout)\n if verbose:\n logger.info(string_summary)\n\n\nclass TensorboardLogger(TrainLogger):\n \"\"\"Logs training information in the format required by Tensorboard.\n\n Arguments\n ---------\n save_dir : str\n A directory for storing all the relevant logs\n\n Raises\n ------\n ImportError if Tensorboard is not installed.\n \"\"\"\n\n def __init__(self, save_dir):\n self.save_dir = save_dir\n\n # Raises ImportError if TensorBoard is not installed\n from torch.utils.tensorboard import SummaryWriter\n\n self.writer = SummaryWriter(self.save_dir)\n self.global_step = {\"train\": {}, \"valid\": {}, \"meta\": 0}\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n self.global_step[\"meta\"] += 1\n for name, value in stats_meta.items():\n self.writer.add_scalar(name, value, self.global_step[\"meta\"])\n\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n for stat, value_list in stats.items():\n if stat not in self.global_step[dataset]:\n self.global_step[dataset][stat] = 0\n tag = f\"{stat}/{dataset}\"\n for value in value_list:\n new_global_step = self.global_step[dataset][stat] + 1\n self.writer.add_scalar(tag, value, new_global_step)\n self.global_step[dataset][stat] = new_global_step\n\n\ndef summarize_average(stat_list):\n return float(sum(stat_list) / len(stat_list))\n\n\ndef summarize_error_rate(stat_list):\n summary = wer_summary(stat_list)\n return summary[\"WER\"]\n", "path": "speechbrain/utils/train_logger.py"}], "after_files": [{"content": "import logging\nfrom speechbrain.utils.edit_distance import wer_summary\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrainLogger:\n \"\"\"Abstract class defining an interface for training loggers.\"\"\"\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"Log the stats for one epoch.\n\n Arguments\n ---------\n stats_meta : dict of str:scalar pairs\n Meta information about the stats (e.g. epoch, learning-rate, etc.)\n train_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the training pass.\n valid_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the validation pass.\n test_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the test pass.\n verbose : bool\n Whether to also put logging information to the standard logger.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileTrainLogger(TrainLogger):\n \"\"\"Text logger of training information\n\n Arguments\n ---------\n save_file : str\n The file to use for logging train information.\n summary_fns : dict of str:function pairs\n Each summary function should take a list produced as output\n from a training/validation pass and summarize it to a single scalar.\n \"\"\"\n\n def __init__(self, save_file, summary_fns):\n self.save_file = save_file\n self.summary_fns = summary_fns\n\n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n if isinstance(value, float) and 0.01 < value < 100.0:\n value = f\"{value:.2f}\"\n elif isinstance(value, float):\n value = f\"{value:.2e}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n\n def _stats_to_string(self, stats, dataset=None):\n \"\"\"Convert all stats to a single string summary\"\"\"\n return \", \".join(\n [self._item_to_string(k, v, dataset) for k, v in stats.items()]\n )\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=True,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n string_summary = self._stats_to_string(stats_meta)\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n summary = {}\n for stat, value_list in stats.items():\n summary[stat] = self.summary_fns[stat](value_list)\n string_summary += \" - \" + self._stats_to_string(summary, dataset)\n\n with open(self.save_file, \"a\") as fout:\n print(string_summary, file=fout)\n if verbose:\n logger.info(string_summary)\n\n\nclass TensorboardLogger(TrainLogger):\n \"\"\"Logs training information in the format required by Tensorboard.\n\n Arguments\n ---------\n save_dir : str\n A directory for storing all the relevant logs\n\n Raises\n ------\n ImportError if Tensorboard is not installed.\n \"\"\"\n\n def __init__(self, save_dir):\n self.save_dir = save_dir\n\n # Raises ImportError if TensorBoard is not installed\n from torch.utils.tensorboard import SummaryWriter\n\n self.writer = SummaryWriter(self.save_dir)\n self.global_step = {\"train\": {}, \"valid\": {}, \"meta\": 0}\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n self.global_step[\"meta\"] += 1\n for name, value in stats_meta.items():\n self.writer.add_scalar(name, value, self.global_step[\"meta\"])\n\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n for stat, value_list in stats.items():\n if stat not in self.global_step[dataset]:\n self.global_step[dataset][stat] = 0\n tag = f\"{stat}/{dataset}\"\n for value in value_list:\n new_global_step = self.global_step[dataset][stat] + 1\n self.writer.add_scalar(tag, value, new_global_step)\n self.global_step[dataset][stat] = new_global_step\n\n\ndef summarize_average(stat_list):\n return float(sum(stat_list) / len(stat_list))\n\n\ndef summarize_error_rate(stat_list):\n summary = wer_summary(stat_list)\n return summary[\"WER\"]\n", "path": "speechbrain/utils/train_logger.py"}]} | 1,687 | 167 |
gh_patches_debug_5285 | rasdani/github-patches | git_diff | freedomofpress__securedrop-7140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release SecureDrop 2.8.0
This is a tracking issue for the release of SecureDrop 2.8.0
Tentatively scheduled as follows:
**Pre-release announcement:** 2023-03-05
**Release date:** 2024-03-12
**Release manager:** @zenmonkeykstop
**Deputy release manager:** @legoktm
**Localization manager:** @cfm
**Communications manager:** @eloquence
_SecureDrop maintainers and testers:_ As you QA 2.8.0, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them "QA: Release".
Test debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey).
# [QA Matrix for 2.8.0](https://docs.google.com/spreadsheets/d/1hcSrgbid03so0tQz3zfwvMaWJ8x7OOZsCfEz1I_PjAE/edit#gid=96348658)
# [Test Plan for 2.8.0](https://github.com/freedomofpress/securedrop/wiki/2.8.0-Test-Plan)
# [Tails-only test plan for 2.8.0-rc2](https://github.com/freedomofpress/securedrop/issues/7121#issuecomment-1988954749)
(complete if you've already tested 2.8.0-rc1, there are no server changes in rc2)
# Prepare release candidate (2.8.0~rc1)
- [ ] Link to latest version of Tails, including release candidates, to test against during QA
- [ ] Tails 5
- [ ] Tails 6
- [x] Prepare 2.8.0~rc1 release changelog
- [x] Branch off release/2.8.0 from develop
- [x] Prepare 2.8.0
- [ ] Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server
- [ ] Commit build log.
# Prepare release candidate (2.8.0~rc2)
- [ ] Link to latest version of Tails, including release candidates, to test against during QA
- [x] Tails 5
- [x] Tails 6
- [x] Prepare 2.8.0~rc2 release changelog
- [x] Branch off release/2.8.0 from develop
- [x] Prepare 2.8.0-rc2
- [ ] ~Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server~ skipped, as changes are Tails-only.
- [ ] ~Commit build log.~
After each test, please update the QA matrix and post details for Basic Server Testing, Application Acceptance Testing and release-specific testing below in comments to this ticket.
# Final release
- [ ] ~Ensure builder in release branch is updated and/or update builder image~ (no longer in use)
- [x] Push signed tag
- [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo)
- [x] Build final Debian packages(and preserve build log)
- [x] Commit package build log to https://github.com/freedomofpress/build-logs
- [x] Pre-Flight: Test that install and upgrade from 2.7.0 to 2.8.0 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs)
- [ ] Flip apt QA server to prod status (merge to `main` in the LFS repo)
- [ ] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo
- [ ] Prepare release messaging
# Post release
- [ ] Create GitHub release object
- [ ] Once release object is created, update versions in `securedrop-docs` and Wagtail
- [ ] Verify new docs show up on https://docs.securedrop.org
- [ ] Publish announcements
- [ ] Merge changelog back to `develop`
- [ ] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/version.py`
Content:
```
1 __version__ = "2.8.0~rc1"
2
```
Path: `securedrop/setup.py`
Content:
```
1 import setuptools
2
3 long_description = "The SecureDrop whistleblower platform."
4
5 setuptools.setup(
6 name="securedrop-app-code",
7 version="2.8.0~rc1",
8 author="Freedom of the Press Foundation",
9 author_email="[email protected]",
10 description="SecureDrop Server",
11 long_description=long_description,
12 long_description_content_type="text/markdown",
13 license="AGPLv3+",
14 python_requires=">=3.8",
15 url="https://github.com/freedomofpress/securedrop",
16 classifiers=[
17 "Development Status :: 5 - Stable",
18 "Programming Language :: Python :: 3",
19 "Topic :: Software Development :: Libraries :: Python Modules",
20 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
21 "Intended Audience :: Developers",
22 "Operating System :: OS Independent",
23 ],
24 )
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/setup.py b/securedrop/setup.py
--- a/securedrop/setup.py
+++ b/securedrop/setup.py
@@ -4,7 +4,7 @@
setuptools.setup(
name="securedrop-app-code",
- version="2.8.0~rc1",
+ version="2.9.0~rc1",
author="Freedom of the Press Foundation",
author_email="[email protected]",
description="SecureDrop Server",
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = "2.8.0~rc1"
+__version__ = "2.9.0~rc1"
| {"golden_diff": "diff --git a/securedrop/setup.py b/securedrop/setup.py\n--- a/securedrop/setup.py\n+++ b/securedrop/setup.py\n@@ -4,7 +4,7 @@\n \n setuptools.setup(\n name=\"securedrop-app-code\",\n- version=\"2.8.0~rc1\",\n+ version=\"2.9.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = \"2.8.0~rc1\"\n+__version__ = \"2.9.0~rc1\"\n", "issue": "Release SecureDrop 2.8.0\nThis is a tracking issue for the release of SecureDrop 2.8.0\r\n\r\nTentatively scheduled as follows:\r\n\r\n**Pre-release announcement:** 2023-03-05\r\n**Release date:** 2024-03-12\r\n\r\n**Release manager:** @zenmonkeykstop \r\n**Deputy release manager:** @legoktm \r\n**Localization manager:** @cfm\r\n**Communications manager:** @eloquence\r\n\r\n_SecureDrop maintainers and testers:_ As you QA 2.8.0, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them \"QA: Release\".\r\n\r\nTest debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey).\r\n\r\n# [QA Matrix for 2.8.0](https://docs.google.com/spreadsheets/d/1hcSrgbid03so0tQz3zfwvMaWJ8x7OOZsCfEz1I_PjAE/edit#gid=96348658)\r\n# [Test Plan for 2.8.0](https://github.com/freedomofpress/securedrop/wiki/2.8.0-Test-Plan)\r\n# [Tails-only test plan for 2.8.0-rc2](https://github.com/freedomofpress/securedrop/issues/7121#issuecomment-1988954749)\r\n(complete if you've already tested 2.8.0-rc1, there are no server changes in rc2)\r\n\r\n# Prepare release candidate (2.8.0~rc1)\r\n- [ ] Link to latest version of Tails, including release candidates, to test against during QA\r\n - [ ] Tails 5 \r\n - [ ] Tails 6 \r\n- [x] Prepare 2.8.0~rc1 release changelog\r\n- [x] Branch off release/2.8.0 from develop\r\n- [x] Prepare 2.8.0\r\n- [ ] Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server\r\n- [ ] Commit build log.\r\n\r\n# Prepare release candidate (2.8.0~rc2)\r\n- [ ] Link to latest version of Tails, including release candidates, to test against during QA\r\n - [x] Tails 5 \r\n - [x] Tails 6 \r\n- [x] Prepare 2.8.0~rc2 release changelog\r\n- [x] Branch off release/2.8.0 from develop\r\n- [x] Prepare 2.8.0-rc2\r\n- [ ] ~Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server~ skipped, as changes are Tails-only.\r\n- [ ] ~Commit build log.~\r\n\r\n\r\nAfter each test, please update the QA matrix and post details for Basic Server Testing, Application Acceptance Testing and release-specific testing below in comments to this ticket.\r\n\r\n# Final release\r\n- [ ] ~Ensure builder in release branch is updated and/or update builder image~ (no longer in use)\r\n- [x] Push signed tag \r\n- [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo)\r\n- [x] Build final Debian packages(and preserve build log)\r\n- [x] Commit package build log to https://github.com/freedomofpress/build-logs\r\n- [x] Pre-Flight: Test that install and upgrade from 2.7.0 to 2.8.0 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs)\r\n- [ ] Flip apt QA server to prod status (merge to `main` in the LFS repo)\r\n- [ ] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo\r\n- [ ] Prepare release messaging\r\n\r\n# Post release\r\n- [ ] Create GitHub release object \r\n- [ ] Once release object is created, update versions in `securedrop-docs` and Wagtail\r\n- [ ] Verify new docs show up on https://docs.securedrop.org\r\n- [ ] Publish announcements\r\n- [ ] Merge changelog back to `develop`\r\n- [ ] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap\n", "before_files": [{"content": "__version__ = \"2.8.0~rc1\"\n", "path": "securedrop/version.py"}, {"content": "import setuptools\n\nlong_description = \"The SecureDrop whistleblower platform.\"\n\nsetuptools.setup(\n name=\"securedrop-app-code\",\n version=\"2.8.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"AGPLv3+\",\n python_requires=\">=3.8\",\n url=\"https://github.com/freedomofpress/securedrop\",\n classifiers=[\n \"Development Status :: 5 - Stable\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "path": "securedrop/setup.py"}], "after_files": [{"content": "__version__ = \"2.9.0~rc1\"\n", "path": "securedrop/version.py"}, {"content": "import setuptools\n\nlong_description = \"The SecureDrop whistleblower platform.\"\n\nsetuptools.setup(\n name=\"securedrop-app-code\",\n version=\"2.9.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"AGPLv3+\",\n python_requires=\">=3.8\",\n url=\"https://github.com/freedomofpress/securedrop\",\n classifiers=[\n \"Development Status :: 5 - Stable\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "path": "securedrop/setup.py"}]} | 1,591 | 175 |
gh_patches_debug_29242 | rasdani/github-patches | git_diff | larq__larq-34 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tf.sign(0) = 0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xquant/quantizers.py`
Content:
```
1 import tensorflow as tf
2 from xquant import utils
3
4
5 @utils.register_keras_custom_object
6 @tf.custom_gradient
7 def ste_sign(x):
8 r"""
9 Sign binarization function.
10 \\[q(x) = \mathrm{Sign}(x)\\]
11
12 The gradient is estimated using the Straight-Through Estimator.
13 \\[\frac{\partial q(x)}{\partial x} = x\\]
14
15 # Arguments
16 x: Input tensor.
17
18 # Returns
19 Binarized tensor.
20
21 # References
22 - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
23 Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
24 """
25
26 def grad(dy):
27 return dy
28
29 return tf.sign(x), grad
30
31
32 @utils.register_keras_custom_object
33 @tf.custom_gradient
34 def approx_sign(x):
35 r"""
36 Sign binarization function.
37 \\[q(x) = \mathrm{Sign}(x)\\]
38
39 The gradient is estimated using the ApproxSign method.
40 \\[\frac{\partial q(x)}{\partial x} = (2 - 2 \left|x\right|))\\]
41
42 # Arguments
43 x: Input tensor.
44
45 # Returns
46 Binarized tensor.
47
48 # References
49 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
50 Representational Capability and Advanced
51 Training Algorithm](http://arxiv.org/abs/1808.00278)
52 """
53
54 def grad(dy):
55 return (1 - tf.abs(x)) * 2 * dy
56
57 return tf.sign(x), grad
58
59
60 def serialize(initializer):
61 return tf.keras.utils.serialize_keras_object(initializer)
62
63
64 def deserialize(name, custom_objects=None):
65 return tf.keras.utils.deserialize_keras_object(
66 name,
67 module_objects=globals(),
68 custom_objects=custom_objects,
69 printable_module_name="quantization function",
70 )
71
72
73 def get(identifier):
74 if identifier is None:
75 return None
76 if isinstance(identifier, str):
77 return deserialize(str(identifier))
78 if callable(identifier):
79 return identifier
80 raise ValueError(
81 "Could not interpret quantization function identifier:", identifier
82 )
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xquant/quantizers.py b/xquant/quantizers.py
--- a/xquant/quantizers.py
+++ b/xquant/quantizers.py
@@ -2,12 +2,22 @@
from xquant import utils
+def sign(x):
+ """A sign function that will never be zero"""
+ return tf.sign(tf.sign(x) + 1e-10)
+
+
@utils.register_keras_custom_object
@tf.custom_gradient
def ste_sign(x):
r"""
Sign binarization function.
- \\[q(x) = \mathrm{Sign}(x)\\]
+ \\[
+ q(x) = \begin{cases}
+ -1 & x < 0 \\\
+ 1 & x \geq 0
+ \end{cases}
+ \\]
The gradient is estimated using the Straight-Through Estimator.
\\[\frac{\partial q(x)}{\partial x} = x\\]
@@ -26,7 +36,7 @@
def grad(dy):
return dy
- return tf.sign(x), grad
+ return sign(x), grad
@utils.register_keras_custom_object
@@ -34,7 +44,12 @@
def approx_sign(x):
r"""
Sign binarization function.
- \\[q(x) = \mathrm{Sign}(x)\\]
+ \\[
+ q(x) = \begin{cases}
+ -1 & x < 0 \\\
+ 1 & x \geq 0
+ \end{cases}
+ \\]
The gradient is estimated using the ApproxSign method.
\\[\frac{\partial q(x)}{\partial x} = (2 - 2 \left|x\right|))\\]
@@ -54,7 +69,7 @@
def grad(dy):
return (1 - tf.abs(x)) * 2 * dy
- return tf.sign(x), grad
+ return sign(x), grad
def serialize(initializer):
| {"golden_diff": "diff --git a/xquant/quantizers.py b/xquant/quantizers.py\n--- a/xquant/quantizers.py\n+++ b/xquant/quantizers.py\n@@ -2,12 +2,22 @@\n from xquant import utils\n \n \n+def sign(x):\n+ \"\"\"A sign function that will never be zero\"\"\"\n+ return tf.sign(tf.sign(x) + 1e-10)\n+\n+\n @utils.register_keras_custom_object\n @tf.custom_gradient\n def ste_sign(x):\n r\"\"\"\n Sign binarization function.\n- \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n+ \\\\[\n+ q(x) = \\begin{cases}\n+ -1 & x < 0 \\\\\\\n+ 1 & x \\geq 0\n+ \\end{cases}\n+ \\\\]\n \n The gradient is estimated using the Straight-Through Estimator.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = x\\\\]\n@@ -26,7 +36,7 @@\n def grad(dy):\n return dy\n \n- return tf.sign(x), grad\n+ return sign(x), grad\n \n \n @utils.register_keras_custom_object\n@@ -34,7 +44,12 @@\n def approx_sign(x):\n r\"\"\"\n Sign binarization function.\n- \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n+ \\\\[\n+ q(x) = \\begin{cases}\n+ -1 & x < 0 \\\\\\\n+ 1 & x \\geq 0\n+ \\end{cases}\n+ \\\\]\n \n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = (2 - 2 \\left|x\\right|))\\\\]\n@@ -54,7 +69,7 @@\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n \n- return tf.sign(x), grad\n+ return sign(x), grad\n \n \n def serialize(initializer):\n", "issue": "tf.sign(0) = 0\n\n", "before_files": [{"content": "import tensorflow as tf\nfrom xquant import utils\n\n\[email protected]_keras_custom_object\[email protected]_gradient\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n\n The gradient is estimated using the Straight-Through Estimator.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = x\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n def grad(dy):\n return dy\n\n return tf.sign(x), grad\n\n\[email protected]_keras_custom_object\[email protected]_gradient\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = (2 - 2 \\left|x\\right|))\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return tf.sign(x), grad\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n \"Could not interpret quantization function identifier:\", identifier\n )\n", "path": "xquant/quantizers.py"}], "after_files": [{"content": "import tensorflow as tf\nfrom xquant import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 1e-10)\n\n\[email protected]_keras_custom_object\[email protected]_gradient\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = x\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\[email protected]_keras_custom_object\[email protected]_gradient\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = (2 - 2 \\left|x\\right|))\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n \"Could not interpret quantization function identifier:\", identifier\n )\n", "path": "xquant/quantizers.py"}]} | 929 | 454 |
gh_patches_debug_13023 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4833 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redirect full path to default version
I'd like to be able to create versionless links to the documentation, for use in error messages, code comments, etc. For example, a message like `see https://click.palletsprojects.com/windows for more information`. I don't want to use URLs with versions because I would have to remember to modify all instances of it before releasing a new version.
Currently, only the root path redirects to the default version, other paths raise a 404. Instead, the path should be preserved and appended to the default version path on redirect.
```
Works:
https://click.palletsprojects.com/ -> https://click.palletsprojects.com/en/7.x/
Doesn't work, 404:
https://click.palletsprojects.com/windows -> https://click.palletsprojects.com/en/7.x/windows
```
I do not want to use the "latest" or "stable" versions because I would like the URLs that people land on and share to contain the actual version.
I already do this with the transitional redirects I set up from `click.pocoo.org` to `click.palletsprojects.com`. A similar approach could probably be used to extend RTD's default redirect.
```nginx
location ~ ^/dev(.*)$ {
return 301 https://click.palletsprojects.com/en/master$1;
}
location ~ ^/(\d)(.*)$ {
return 301 https://click.palletsprojects.com/en/$1.x$2;
}
location ~ ^/latest(.*)$ {
return 301 https://click.palletsprojects.com/en/7.x$1;
}
location / {
return 301 https://click.palletsprojects.com/en/7.x$request_uri;
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/core/views/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 Core views, including the main homepage,
5
6 documentation and header rendering, and server errors.
7 """
8
9 from __future__ import absolute_import
10 from __future__ import division
11 import os
12 import logging
13
14 from django.conf import settings
15 from django.http import HttpResponseRedirect, Http404, JsonResponse
16 from django.shortcuts import render, get_object_or_404, redirect
17 from django.views.decorators.csrf import csrf_exempt
18 from django.views.generic import TemplateView
19
20 from readthedocs.builds.models import Version
21 from readthedocs.core.utils import broadcast
22 from readthedocs.projects.models import Project, ImportedFile
23 from readthedocs.projects.tasks import remove_dir
24 from readthedocs.redirects.utils import get_redirect_response
25
26 log = logging.getLogger(__name__)
27
28
29 class NoProjectException(Exception):
30 pass
31
32
33 class HomepageView(TemplateView):
34
35 template_name = 'homepage.html'
36
37 def get_context_data(self, **kwargs):
38 """Add latest builds and featured projects."""
39 context = super(HomepageView, self).get_context_data(**kwargs)
40 context['featured_list'] = Project.objects.filter(featured=True)
41 context['projects_count'] = Project.objects.count()
42 return context
43
44
45 class SupportView(TemplateView):
46 template_name = 'support.html'
47
48 def get_context_data(self, **kwargs):
49 context = super(SupportView, self).get_context_data(**kwargs)
50 support_email = getattr(settings, 'SUPPORT_EMAIL', None)
51 if not support_email:
52 support_email = 'support@{domain}'.format(
53 domain=getattr(
54 settings,
55 'PRODUCTION_DOMAIN',
56 'readthedocs.org',
57 ),
58 )
59
60 context['support_email'] = support_email
61 return context
62
63
64 def random_page(request, project_slug=None): # pylint: disable=unused-argument
65 imported_file = ImportedFile.objects.order_by('?')
66 if project_slug:
67 imported_file = imported_file.filter(project__slug=project_slug)
68 imported_file = imported_file.first()
69 if imported_file is None:
70 raise Http404
71 url = imported_file.get_absolute_url()
72 return HttpResponseRedirect(url)
73
74
75 @csrf_exempt
76 def wipe_version(request, project_slug, version_slug):
77 version = get_object_or_404(
78 Version,
79 project__slug=project_slug,
80 slug=version_slug,
81 )
82 # We need to check by ``for_admin_user`` here to allow members of the
83 # ``Admin`` team (which doesn't own the project) under the corporate site.
84 if version.project not in Project.objects.for_admin_user(user=request.user):
85 raise Http404('You must own this project to wipe it.')
86
87 if request.method == 'POST':
88 del_dirs = [
89 os.path.join(version.project.doc_path, 'checkouts', version.slug),
90 os.path.join(version.project.doc_path, 'envs', version.slug),
91 os.path.join(version.project.doc_path, 'conda', version.slug),
92 ]
93 for del_dir in del_dirs:
94 broadcast(type='build', task=remove_dir, args=[del_dir])
95 return redirect('project_version_list', project_slug)
96 return render(
97 request,
98 'wipe_version.html',
99 {'version': version, 'project': version.project},
100 )
101
102
103 def server_error_500(request, template_name='500.html'):
104 """A simple 500 handler so we get media."""
105 r = render(request, template_name)
106 r.status_code = 500
107 return r
108
109
110 def server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa
111 """
112 A simple 404 handler so we get media.
113
114 .. note::
115
116 Marking exception as optional to make /404/ testing page to work.
117 """
118 response = get_redirect_response(request, path=request.get_full_path())
119 if response:
120 return response
121 r = render(request, template_name)
122 r.status_code = 404
123 return r
124
125
126 def do_not_track(request):
127 dnt_header = request.META.get('HTTP_DNT')
128
129 # https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation
130 return JsonResponse({ # pylint: disable=redundant-content-type-for-json-response
131 'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',
132 'same-party': [
133 'readthedocs.org',
134 'readthedocs.com',
135 'readthedocs.io', # .org Documentation Sites
136 'readthedocs-hosted.com', # .com Documentation Sites
137 ],
138 'tracking': 'N' if dnt_header == '1' else 'T',
139 }, content_type='application/tracking-status+json')
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/core/views/__init__.py b/readthedocs/core/views/__init__.py
--- a/readthedocs/core/views/__init__.py
+++ b/readthedocs/core/views/__init__.py
@@ -116,8 +116,16 @@
Marking exception as optional to make /404/ testing page to work.
"""
response = get_redirect_response(request, path=request.get_full_path())
+
if response:
- return response
+ if response.url == request.build_absolute_uri():
+ # check that we do have a response and avoid infinite redirect
+ log.warning(
+ 'Infinite Redirect: FROM URL is the same than TO URL. url=%s',
+ response.url,
+ )
+ else:
+ return response
r = render(request, template_name)
r.status_code = 404
return r
| {"golden_diff": "diff --git a/readthedocs/core/views/__init__.py b/readthedocs/core/views/__init__.py\n--- a/readthedocs/core/views/__init__.py\n+++ b/readthedocs/core/views/__init__.py\n@@ -116,8 +116,16 @@\n Marking exception as optional to make /404/ testing page to work.\n \"\"\"\n response = get_redirect_response(request, path=request.get_full_path())\n+\n if response:\n- return response\n+ if response.url == request.build_absolute_uri():\n+ # check that we do have a response and avoid infinite redirect\n+ log.warning(\n+ 'Infinite Redirect: FROM URL is the same than TO URL. url=%s',\n+ response.url,\n+ )\n+ else:\n+ return response\n r = render(request, template_name)\n r.status_code = 404\n return r\n", "issue": "Redirect full path to default version\nI'd like to be able to create versionless links to the documentation, for use in error messages, code comments, etc. For example, a message like `see https://click.palletsprojects.com/windows for more information`. I don't want to use URLs with versions because I would have to remember to modify all instances of it before releasing a new version.\r\n\r\nCurrently, only the root path redirects to the default version, other paths raise a 404. Instead, the path should be preserved and appended to the default version path on redirect.\r\n\r\n```\r\nWorks:\r\nhttps://click.palletsprojects.com/ -> https://click.palletsprojects.com/en/7.x/\r\n\r\nDoesn't work, 404:\r\nhttps://click.palletsprojects.com/windows -> https://click.palletsprojects.com/en/7.x/windows\r\n```\r\n\r\nI do not want to use the \"latest\" or \"stable\" versions because I would like the URLs that people land on and share to contain the actual version.\r\n\r\nI already do this with the transitional redirects I set up from `click.pocoo.org` to `click.palletsprojects.com`. A similar approach could probably be used to extend RTD's default redirect.\r\n\r\n```nginx\r\nlocation ~ ^/dev(.*)$ {\r\n return 301 https://click.palletsprojects.com/en/master$1;\r\n}\r\n\r\nlocation ~ ^/(\\d)(.*)$ {\r\n return 301 https://click.palletsprojects.com/en/$1.x$2;\r\n}\r\n\r\nlocation ~ ^/latest(.*)$ {\r\n return 301 https://click.palletsprojects.com/en/7.x$1;\r\n}\r\n\r\nlocation / {\r\n return 301 https://click.palletsprojects.com/en/7.x$request_uri;\r\n}\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCore views, including the main homepage,\n\ndocumentation and header rendering, and server errors.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport os\nimport logging\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, Http404, JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import TemplateView\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.utils import broadcast\nfrom readthedocs.projects.models import Project, ImportedFile\nfrom readthedocs.projects.tasks import remove_dir\nfrom readthedocs.redirects.utils import get_redirect_response\n\nlog = logging.getLogger(__name__)\n\n\nclass NoProjectException(Exception):\n pass\n\n\nclass HomepageView(TemplateView):\n\n template_name = 'homepage.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Add latest builds and featured projects.\"\"\"\n context = super(HomepageView, self).get_context_data(**kwargs)\n context['featured_list'] = Project.objects.filter(featured=True)\n context['projects_count'] = Project.objects.count()\n return context\n\n\nclass SupportView(TemplateView):\n template_name = 'support.html'\n\n def get_context_data(self, **kwargs):\n context = super(SupportView, self).get_context_data(**kwargs)\n support_email = getattr(settings, 'SUPPORT_EMAIL', None)\n if not support_email:\n support_email = 'support@{domain}'.format(\n domain=getattr(\n settings,\n 'PRODUCTION_DOMAIN',\n 'readthedocs.org',\n ),\n )\n\n context['support_email'] = support_email\n return context\n\n\ndef random_page(request, project_slug=None): # pylint: disable=unused-argument\n imported_file = ImportedFile.objects.order_by('?')\n if project_slug:\n imported_file = imported_file.filter(project__slug=project_slug)\n imported_file = imported_file.first()\n if imported_file is None:\n raise Http404\n url = imported_file.get_absolute_url()\n return HttpResponseRedirect(url)\n\n\n@csrf_exempt\ndef wipe_version(request, project_slug, version_slug):\n version = get_object_or_404(\n Version,\n project__slug=project_slug,\n slug=version_slug,\n )\n # We need to check by ``for_admin_user`` here to allow members of the\n # ``Admin`` team (which doesn't own the project) under the corporate site.\n if version.project not in Project.objects.for_admin_user(user=request.user):\n raise Http404('You must own this project to wipe it.')\n\n if request.method == 'POST':\n del_dirs = [\n os.path.join(version.project.doc_path, 'checkouts', version.slug),\n os.path.join(version.project.doc_path, 'envs', version.slug),\n os.path.join(version.project.doc_path, 'conda', version.slug),\n ]\n for del_dir in del_dirs:\n broadcast(type='build', task=remove_dir, args=[del_dir])\n return redirect('project_version_list', project_slug)\n return render(\n request,\n 'wipe_version.html',\n {'version': version, 'project': version.project},\n )\n\n\ndef server_error_500(request, template_name='500.html'):\n \"\"\"A simple 500 handler so we get media.\"\"\"\n r = render(request, template_name)\n r.status_code = 500\n return r\n\n\ndef server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa\n \"\"\"\n A simple 404 handler so we get media.\n\n .. note::\n\n Marking exception as optional to make /404/ testing page to work.\n \"\"\"\n response = get_redirect_response(request, path=request.get_full_path())\n if response:\n return response\n r = render(request, template_name)\n r.status_code = 404\n return r\n\n\ndef do_not_track(request):\n dnt_header = request.META.get('HTTP_DNT')\n\n # https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation\n return JsonResponse({ # pylint: disable=redundant-content-type-for-json-response\n 'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',\n 'same-party': [\n 'readthedocs.org',\n 'readthedocs.com',\n 'readthedocs.io', # .org Documentation Sites\n 'readthedocs-hosted.com', # .com Documentation Sites\n ],\n 'tracking': 'N' if dnt_header == '1' else 'T',\n }, content_type='application/tracking-status+json')\n", "path": "readthedocs/core/views/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCore views, including the main homepage,\n\ndocumentation and header rendering, and server errors.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport os\nimport logging\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, Http404, JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import TemplateView\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.utils import broadcast\nfrom readthedocs.projects.models import Project, ImportedFile\nfrom readthedocs.projects.tasks import remove_dir\nfrom readthedocs.redirects.utils import get_redirect_response\n\nlog = logging.getLogger(__name__)\n\n\nclass NoProjectException(Exception):\n pass\n\n\nclass HomepageView(TemplateView):\n\n template_name = 'homepage.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Add latest builds and featured projects.\"\"\"\n context = super(HomepageView, self).get_context_data(**kwargs)\n context['featured_list'] = Project.objects.filter(featured=True)\n context['projects_count'] = Project.objects.count()\n return context\n\n\nclass SupportView(TemplateView):\n template_name = 'support.html'\n\n def get_context_data(self, **kwargs):\n context = super(SupportView, self).get_context_data(**kwargs)\n support_email = getattr(settings, 'SUPPORT_EMAIL', None)\n if not support_email:\n support_email = 'support@{domain}'.format(\n domain=getattr(\n settings,\n 'PRODUCTION_DOMAIN',\n 'readthedocs.org',\n ),\n )\n\n context['support_email'] = support_email\n return context\n\n\ndef random_page(request, project_slug=None): # pylint: disable=unused-argument\n imported_file = ImportedFile.objects.order_by('?')\n if project_slug:\n imported_file = imported_file.filter(project__slug=project_slug)\n imported_file = imported_file.first()\n if imported_file is None:\n raise Http404\n url = imported_file.get_absolute_url()\n return HttpResponseRedirect(url)\n\n\n@csrf_exempt\ndef wipe_version(request, project_slug, version_slug):\n version = get_object_or_404(\n Version,\n project__slug=project_slug,\n slug=version_slug,\n )\n # We need to check by ``for_admin_user`` here to allow members of the\n # ``Admin`` team (which doesn't own the project) under the corporate site.\n if version.project not in Project.objects.for_admin_user(user=request.user):\n raise Http404('You must own this project to wipe it.')\n\n if request.method == 'POST':\n del_dirs = [\n os.path.join(version.project.doc_path, 'checkouts', version.slug),\n os.path.join(version.project.doc_path, 'envs', version.slug),\n os.path.join(version.project.doc_path, 'conda', version.slug),\n ]\n for del_dir in del_dirs:\n broadcast(type='build', task=remove_dir, args=[del_dir])\n return redirect('project_version_list', project_slug)\n return render(\n request,\n 'wipe_version.html',\n {'version': version, 'project': version.project},\n )\n\n\ndef server_error_500(request, template_name='500.html'):\n \"\"\"A simple 500 handler so we get media.\"\"\"\n r = render(request, template_name)\n r.status_code = 500\n return r\n\n\ndef server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa\n \"\"\"\n A simple 404 handler so we get media.\n\n .. note::\n\n Marking exception as optional to make /404/ testing page to work.\n \"\"\"\n response = get_redirect_response(request, path=request.get_full_path())\n\n if response:\n if response.url == request.build_absolute_uri():\n # check that we do have a response and avoid infinite redirect\n log.warning(\n 'Infinite Redirect: FROM URL is the same than TO URL. url=%s',\n response.url,\n )\n else:\n return response\n r = render(request, template_name)\n r.status_code = 404\n return r\n\n\ndef do_not_track(request):\n dnt_header = request.META.get('HTTP_DNT')\n\n # https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation\n return JsonResponse({ # pylint: disable=redundant-content-type-for-json-response\n 'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',\n 'same-party': [\n 'readthedocs.org',\n 'readthedocs.com',\n 'readthedocs.io', # .org Documentation Sites\n 'readthedocs-hosted.com', # .com Documentation Sites\n ],\n 'tracking': 'N' if dnt_header == '1' else 'T',\n }, content_type='application/tracking-status+json')\n", "path": "readthedocs/core/views/__init__.py"}]} | 2,004 | 198 |
gh_patches_debug_38792 | rasdani/github-patches | git_diff | openmc-dev__openmc-1732 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature: Easier indexing through ResultsList to find depletion step index
If a user wanted to perform a restart simulation not necessarily at the end point in time, there isn't a super simple way to do it from the API. Currently you would have to manually search through the `Result` instances and compare their time data one at a time.
This isn't a monumental task, and I think we could easily provide a method that would allow users to find the index corresponding to a specific point in time. Supporting similar `time_units` like we do for the `Operator` (days, seconds, MWd/kgHM) would also be super nice.
Looking at #1708, this would also be useful when exporting `material.xml` files either during the depletion run or after the fact. Either way, the user will need a way to link a depletion step to a point in calendar time for restarting or other analysis.
Other things to consider would be some tolerance on the search criteria. If I ask for the step where burnup was 12 MWd/kgHM, (for example), but the closest point is 12.1 MWd/kgHM, should that step be returned? Or error out?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openmc/deplete/results_list.py`
Content:
```
1 import h5py
2 import numpy as np
3
4 from .results import Results, VERSION_RESULTS
5 from openmc.checkvalue import check_filetype_version, check_value
6
7
8 __all__ = ["ResultsList"]
9
10
11 class ResultsList(list):
12 """A list of openmc.deplete.Results objects
13
14 It is recommended to use :meth:`from_hdf5` over
15 direct creation.
16 """
17
18 @classmethod
19 def from_hdf5(cls, filename):
20 """Load in depletion results from a previous file
21
22 Parameters
23 ----------
24 filename : str
25 Path to depletion result file
26
27 Returns
28 -------
29 new : ResultsList
30 New instance of depletion results
31 """
32 with h5py.File(str(filename), "r") as fh:
33 check_filetype_version(fh, 'depletion results', VERSION_RESULTS[0])
34 new = cls()
35
36 # Get number of results stored
37 n = fh["number"][...].shape[0]
38
39 for i in range(n):
40 new.append(Results.from_hdf5(fh, i))
41 return new
42
43 def get_atoms(self, mat, nuc, nuc_units="atoms", time_units="s"):
44 """Get number of nuclides over time from a single material
45
46 .. note::
47 Initial values for some isotopes that do not appear in
48 initial concentrations may be non-zero, depending on the
49 value of :class:`openmc.deplete.Operator` ``dilute_initial``.
50 The :class:`openmc.deplete.Operator` adds isotopes according
51 to this setting, which can be set to zero.
52
53 Parameters
54 ----------
55 mat : str
56 Material name to evaluate
57 nuc : str
58 Nuclide name to evaluate
59 nuc_units : {"atoms", "atom/b-cm", "atom/cm3"}, optional
60 Units for the returned concentration. Default is ``"atoms"``
61
62 .. versionadded:: 0.12
63 time_units : {"s", "min", "h", "d"}, optional
64 Units for the returned time array. Default is ``"s"`` to
65 return the value in seconds.
66
67 .. versionadded:: 0.12
68
69 Returns
70 -------
71 times : numpy.ndarray
72 Array of times in units of ``time_units``
73 concentrations : numpy.ndarray
74 Concentration of specified nuclide in units of ``nuc_units``
75
76 """
77 check_value("time_units", time_units, {"s", "d", "min", "h"})
78 check_value("nuc_units", nuc_units,
79 {"atoms", "atom/b-cm", "atom/cm3"})
80
81 times = np.empty_like(self, dtype=float)
82 concentrations = np.empty_like(self, dtype=float)
83
84 # Evaluate value in each region
85 for i, result in enumerate(self):
86 times[i] = result.time[0]
87 concentrations[i] = result[0, mat, nuc]
88
89 # Unit conversions
90 if time_units == "d":
91 times /= (60 * 60 * 24)
92 elif time_units == "h":
93 times /= (60 * 60)
94 elif time_units == "min":
95 times /= 60
96
97 if nuc_units != "atoms":
98 # Divide by volume to get density
99 concentrations /= self[0].volume[mat]
100 if nuc_units == "atom/b-cm":
101 # 1 barn = 1e-24 cm^2
102 concentrations *= 1e-24
103
104 return times, concentrations
105
106 def get_reaction_rate(self, mat, nuc, rx):
107 """Get reaction rate in a single material/nuclide over time
108
109 .. note::
110
111 Initial values for some isotopes that do not appear in
112 initial concentrations may be non-zero, depending on the
113 value of :class:`openmc.deplete.Operator` ``dilute_initial``
114 The :class:`openmc.deplete.Operator` adds isotopes according
115 to this setting, which can be set to zero.
116
117 Parameters
118 ----------
119 mat : str
120 Material name to evaluate
121 nuc : str
122 Nuclide name to evaluate
123 rx : str
124 Reaction rate to evaluate
125
126 Returns
127 -------
128 times : numpy.ndarray
129 Array of times in [s]
130 rates : numpy.ndarray
131 Array of reaction rates
132
133 """
134 times = np.empty_like(self, dtype=float)
135 rates = np.empty_like(self, dtype=float)
136
137 # Evaluate value in each region
138 for i, result in enumerate(self):
139 times[i] = result.time[0]
140 rates[i] = result.rates[0].get(mat, nuc, rx) * result[0, mat, nuc]
141
142 return times, rates
143
144 def get_eigenvalue(self):
145 """Evaluates the eigenvalue from a results list.
146
147 Returns
148 -------
149 times : numpy.ndarray
150 Array of times in [s]
151 eigenvalues : numpy.ndarray
152 k-eigenvalue at each time. Column 0
153 contains the eigenvalue, while column
154 1 contains the associated uncertainty
155
156 """
157 times = np.empty_like(self, dtype=float)
158 eigenvalues = np.empty((len(self), 2), dtype=float)
159
160 # Get time/eigenvalue at each point
161 for i, result in enumerate(self):
162 times[i] = result.time[0]
163 eigenvalues[i] = result.k[0]
164
165 return times, eigenvalues
166
167 def get_depletion_time(self):
168 """Return an array of the average time to deplete a material
169
170 .. note::
171
172 Will have one fewer row than number of other methods,
173 like :meth:`get_eigenvalues`, because no depletion
174 is performed at the final transport stage
175
176 Returns
177 -------
178 times : numpy.ndarray
179 Vector of average time to deplete a single material
180 across all processes and materials.
181
182 """
183 times = np.empty(len(self) - 1)
184 # Need special logic because the predictor
185 # writes EOS values for step i as BOS values
186 # for step i+1
187 # The first proc_time may be zero
188 if self[0].proc_time > 0.0:
189 items = self[:-1]
190 else:
191 items = self[1:]
192 for ix, res in enumerate(items):
193 times[ix] = res.proc_time
194 return times
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openmc/deplete/results_list.py b/openmc/deplete/results_list.py
--- a/openmc/deplete/results_list.py
+++ b/openmc/deplete/results_list.py
@@ -1,8 +1,12 @@
+import numbers
+import bisect
+import math
+
import h5py
import numpy as np
from .results import Results, VERSION_RESULTS
-from openmc.checkvalue import check_filetype_version, check_value
+from openmc.checkvalue import check_filetype_version, check_value, check_type
__all__ = ["ResultsList"]
@@ -192,3 +196,103 @@
for ix, res in enumerate(items):
times[ix] = res.proc_time
return times
+
+ def get_times(self, time_units="d") -> np.ndarray:
+ """Return the points in time that define the depletion schedule
+
+
+ .. versionadded:: 0.12.1
+
+ Parameters
+ ----------
+ time_units : {"s", "d", "h", "min"}, optional
+ Return the vector in these units. Default is to
+ convert to days
+
+ Returns
+ -------
+ numpy.ndarray
+ 1-D vector of time points
+
+ """
+ check_type("time_units", time_units, str)
+
+ times = np.fromiter(
+ (r.time[0] for r in self),
+ dtype=self[0].time.dtype,
+ count=len(self),
+ )
+
+ if time_units == "d":
+ times /= (60 * 60 * 24)
+ elif time_units == "h":
+ times /= (60 * 60)
+ elif time_units == "min":
+ times /= 60
+ elif time_units != "s":
+ raise ValueError(
+ 'Unable to set "time_units" to {} since it is not '
+ 'in ("s", "d", "min", "h")'.format(time_units)
+ )
+ return times
+
+ def get_step_where(
+ self, time, time_units="d", atol=1e-6, rtol=1e-3
+ ) -> int:
+ """Return the index closest to a given point in time
+
+ In the event ``time`` lies exactly between two points, the
+ lower index will be returned. It is possible that the index
+ will be at most one past the point in time requested, but only
+ according to tolerances requested.
+
+ Passing ``atol=math.inf`` and ``rtol=math.inf`` will return
+ the closest index to the requested point.
+
+
+ .. versionadded:: 0.12.1
+
+ Parameters
+ ----------
+ time : float
+ Desired point in time
+ time_units : {"s", "d", "min", "h"}, optional
+ Units on ``time``. Default: days
+ atol : float, optional
+ Absolute tolerance (in ``time_units``) if ``time`` is not
+ found.
+ rtol : float, optional
+ Relative tolerance if ``time`` is not found.
+
+ Returns
+ -------
+ int
+
+ """
+ check_type("time", time, numbers.Real)
+ check_type("atol", atol, numbers.Real)
+ check_type("rtol", rtol, numbers.Real)
+
+ times = self.get_times(time_units)
+
+ if times[0] < time < times[-1]:
+ ix = bisect.bisect_left(times, time)
+ if ix == times.size:
+ ix -= 1
+ # Bisection will place us either directly on the point
+ # or one-past the first value less than time
+ elif time - times[ix - 1] <= times[ix] - time:
+ ix -= 1
+ elif times[0] >= time:
+ ix = 0
+ elif time >= times[-1]:
+ ix = times.size - 1
+
+ if math.isclose(time, times[ix], rel_tol=rtol, abs_tol=atol):
+ return ix
+
+ raise ValueError(
+ "A value of {} {} was not found given absolute and "
+ "relative tolerances {} and {}.".format(
+ time, time_units, atol, rtol)
+ )
| {"golden_diff": "diff --git a/openmc/deplete/results_list.py b/openmc/deplete/results_list.py\n--- a/openmc/deplete/results_list.py\n+++ b/openmc/deplete/results_list.py\n@@ -1,8 +1,12 @@\n+import numbers\n+import bisect\n+import math\n+\n import h5py\n import numpy as np\n \n from .results import Results, VERSION_RESULTS\n-from openmc.checkvalue import check_filetype_version, check_value\n+from openmc.checkvalue import check_filetype_version, check_value, check_type\n \n \n __all__ = [\"ResultsList\"]\n@@ -192,3 +196,103 @@\n for ix, res in enumerate(items):\n times[ix] = res.proc_time\n return times\n+\n+ def get_times(self, time_units=\"d\") -> np.ndarray:\n+ \"\"\"Return the points in time that define the depletion schedule\n+\n+\n+ .. versionadded:: 0.12.1\n+\n+ Parameters\n+ ----------\n+ time_units : {\"s\", \"d\", \"h\", \"min\"}, optional\n+ Return the vector in these units. Default is to\n+ convert to days\n+\n+ Returns\n+ -------\n+ numpy.ndarray\n+ 1-D vector of time points\n+\n+ \"\"\"\n+ check_type(\"time_units\", time_units, str)\n+\n+ times = np.fromiter(\n+ (r.time[0] for r in self),\n+ dtype=self[0].time.dtype,\n+ count=len(self),\n+ )\n+\n+ if time_units == \"d\":\n+ times /= (60 * 60 * 24)\n+ elif time_units == \"h\":\n+ times /= (60 * 60)\n+ elif time_units == \"min\":\n+ times /= 60\n+ elif time_units != \"s\":\n+ raise ValueError(\n+ 'Unable to set \"time_units\" to {} since it is not '\n+ 'in (\"s\", \"d\", \"min\", \"h\")'.format(time_units)\n+ )\n+ return times\n+\n+ def get_step_where(\n+ self, time, time_units=\"d\", atol=1e-6, rtol=1e-3\n+ ) -> int:\n+ \"\"\"Return the index closest to a given point in time\n+\n+ In the event ``time`` lies exactly between two points, the\n+ lower index will be returned. It is possible that the index\n+ will be at most one past the point in time requested, but only\n+ according to tolerances requested.\n+\n+ Passing ``atol=math.inf`` and ``rtol=math.inf`` will return\n+ the closest index to the requested point.\n+\n+\n+ .. versionadded:: 0.12.1\n+\n+ Parameters\n+ ----------\n+ time : float\n+ Desired point in time\n+ time_units : {\"s\", \"d\", \"min\", \"h\"}, optional\n+ Units on ``time``. Default: days\n+ atol : float, optional\n+ Absolute tolerance (in ``time_units``) if ``time`` is not\n+ found.\n+ rtol : float, optional\n+ Relative tolerance if ``time`` is not found.\n+\n+ Returns\n+ -------\n+ int\n+\n+ \"\"\"\n+ check_type(\"time\", time, numbers.Real)\n+ check_type(\"atol\", atol, numbers.Real)\n+ check_type(\"rtol\", rtol, numbers.Real)\n+\n+ times = self.get_times(time_units)\n+\n+ if times[0] < time < times[-1]:\n+ ix = bisect.bisect_left(times, time)\n+ if ix == times.size:\n+ ix -= 1\n+ # Bisection will place us either directly on the point\n+ # or one-past the first value less than time\n+ elif time - times[ix - 1] <= times[ix] - time:\n+ ix -= 1\n+ elif times[0] >= time:\n+ ix = 0\n+ elif time >= times[-1]:\n+ ix = times.size - 1\n+\n+ if math.isclose(time, times[ix], rel_tol=rtol, abs_tol=atol):\n+ return ix\n+\n+ raise ValueError(\n+ \"A value of {} {} was not found given absolute and \"\n+ \"relative tolerances {} and {}.\".format(\n+ time, time_units, atol, rtol)\n+ )\n", "issue": "Feature: Easier indexing through ResultsList to find depletion step index\nIf a user wanted to perform a restart simulation not necessarily at the end point in time, there isn't a super simple way to do it from the API. Currently you would have to manually search through the `Result` instances and compare their time data one at a time. \r\n\r\nThis isn't a monumental task, and I think we could easily provide a method that would allow users to find the index corresponding to a specific point in time. Supporting similar `time_units` like we do for the `Operator` (days, seconds, MWd/kgHM) would also be super nice. \r\n\r\nLooking at #1708, this would also be useful when exporting `material.xml` files either during the depletion run or after the fact. Either way, the user will need a way to link a depletion step to a point in calendar time for restarting or other analysis.\r\n\r\nOther things to consider would be some tolerance on the search criteria. If I ask for the step where burnup was 12 MWd/kgHM, (for example), but the closest point is 12.1 MWd/kgHM, should that step be returned? Or error out? \n", "before_files": [{"content": "import h5py\nimport numpy as np\n\nfrom .results import Results, VERSION_RESULTS\nfrom openmc.checkvalue import check_filetype_version, check_value\n\n\n__all__ = [\"ResultsList\"]\n\n\nclass ResultsList(list):\n \"\"\"A list of openmc.deplete.Results objects\n\n It is recommended to use :meth:`from_hdf5` over\n direct creation.\n \"\"\"\n\n @classmethod\n def from_hdf5(cls, filename):\n \"\"\"Load in depletion results from a previous file\n\n Parameters\n ----------\n filename : str\n Path to depletion result file\n\n Returns\n -------\n new : ResultsList\n New instance of depletion results\n \"\"\"\n with h5py.File(str(filename), \"r\") as fh:\n check_filetype_version(fh, 'depletion results', VERSION_RESULTS[0])\n new = cls()\n\n # Get number of results stored\n n = fh[\"number\"][...].shape[0]\n\n for i in range(n):\n new.append(Results.from_hdf5(fh, i))\n return new\n\n def get_atoms(self, mat, nuc, nuc_units=\"atoms\", time_units=\"s\"):\n \"\"\"Get number of nuclides over time from a single material\n\n .. note::\n Initial values for some isotopes that do not appear in\n initial concentrations may be non-zero, depending on the\n value of :class:`openmc.deplete.Operator` ``dilute_initial``.\n The :class:`openmc.deplete.Operator` adds isotopes according\n to this setting, which can be set to zero.\n\n Parameters\n ----------\n mat : str\n Material name to evaluate\n nuc : str\n Nuclide name to evaluate\n nuc_units : {\"atoms\", \"atom/b-cm\", \"atom/cm3\"}, optional\n Units for the returned concentration. Default is ``\"atoms\"``\n\n .. versionadded:: 0.12\n time_units : {\"s\", \"min\", \"h\", \"d\"}, optional\n Units for the returned time array. Default is ``\"s\"`` to\n return the value in seconds.\n\n .. versionadded:: 0.12\n\n Returns\n -------\n times : numpy.ndarray\n Array of times in units of ``time_units``\n concentrations : numpy.ndarray\n Concentration of specified nuclide in units of ``nuc_units``\n\n \"\"\"\n check_value(\"time_units\", time_units, {\"s\", \"d\", \"min\", \"h\"})\n check_value(\"nuc_units\", nuc_units,\n {\"atoms\", \"atom/b-cm\", \"atom/cm3\"})\n\n times = np.empty_like(self, dtype=float)\n concentrations = np.empty_like(self, dtype=float)\n\n # Evaluate value in each region\n for i, result in enumerate(self):\n times[i] = result.time[0]\n concentrations[i] = result[0, mat, nuc]\n\n # Unit conversions\n if time_units == \"d\":\n times /= (60 * 60 * 24)\n elif time_units == \"h\":\n times /= (60 * 60)\n elif time_units == \"min\":\n times /= 60\n\n if nuc_units != \"atoms\":\n # Divide by volume to get density\n concentrations /= self[0].volume[mat]\n if nuc_units == \"atom/b-cm\":\n # 1 barn = 1e-24 cm^2\n concentrations *= 1e-24\n\n return times, concentrations\n\n def get_reaction_rate(self, mat, nuc, rx):\n \"\"\"Get reaction rate in a single material/nuclide over time\n\n .. note::\n\n Initial values for some isotopes that do not appear in\n initial concentrations may be non-zero, depending on the\n value of :class:`openmc.deplete.Operator` ``dilute_initial``\n The :class:`openmc.deplete.Operator` adds isotopes according\n to this setting, which can be set to zero.\n\n Parameters\n ----------\n mat : str\n Material name to evaluate\n nuc : str\n Nuclide name to evaluate\n rx : str\n Reaction rate to evaluate\n\n Returns\n -------\n times : numpy.ndarray\n Array of times in [s]\n rates : numpy.ndarray\n Array of reaction rates\n\n \"\"\"\n times = np.empty_like(self, dtype=float)\n rates = np.empty_like(self, dtype=float)\n\n # Evaluate value in each region\n for i, result in enumerate(self):\n times[i] = result.time[0]\n rates[i] = result.rates[0].get(mat, nuc, rx) * result[0, mat, nuc]\n\n return times, rates\n\n def get_eigenvalue(self):\n \"\"\"Evaluates the eigenvalue from a results list.\n\n Returns\n -------\n times : numpy.ndarray\n Array of times in [s]\n eigenvalues : numpy.ndarray\n k-eigenvalue at each time. Column 0\n contains the eigenvalue, while column\n 1 contains the associated uncertainty\n\n \"\"\"\n times = np.empty_like(self, dtype=float)\n eigenvalues = np.empty((len(self), 2), dtype=float)\n\n # Get time/eigenvalue at each point\n for i, result in enumerate(self):\n times[i] = result.time[0]\n eigenvalues[i] = result.k[0]\n\n return times, eigenvalues\n\n def get_depletion_time(self):\n \"\"\"Return an array of the average time to deplete a material\n\n .. note::\n\n Will have one fewer row than number of other methods,\n like :meth:`get_eigenvalues`, because no depletion\n is performed at the final transport stage\n\n Returns\n -------\n times : numpy.ndarray\n Vector of average time to deplete a single material\n across all processes and materials.\n\n \"\"\"\n times = np.empty(len(self) - 1)\n # Need special logic because the predictor\n # writes EOS values for step i as BOS values\n # for step i+1\n # The first proc_time may be zero\n if self[0].proc_time > 0.0:\n items = self[:-1]\n else:\n items = self[1:]\n for ix, res in enumerate(items):\n times[ix] = res.proc_time\n return times\n", "path": "openmc/deplete/results_list.py"}], "after_files": [{"content": "import numbers\nimport bisect\nimport math\n\nimport h5py\nimport numpy as np\n\nfrom .results import Results, VERSION_RESULTS\nfrom openmc.checkvalue import check_filetype_version, check_value, check_type\n\n\n__all__ = [\"ResultsList\"]\n\n\nclass ResultsList(list):\n \"\"\"A list of openmc.deplete.Results objects\n\n It is recommended to use :meth:`from_hdf5` over\n direct creation.\n \"\"\"\n\n @classmethod\n def from_hdf5(cls, filename):\n \"\"\"Load in depletion results from a previous file\n\n Parameters\n ----------\n filename : str\n Path to depletion result file\n\n Returns\n -------\n new : ResultsList\n New instance of depletion results\n \"\"\"\n with h5py.File(str(filename), \"r\") as fh:\n check_filetype_version(fh, 'depletion results', VERSION_RESULTS[0])\n new = cls()\n\n # Get number of results stored\n n = fh[\"number\"][...].shape[0]\n\n for i in range(n):\n new.append(Results.from_hdf5(fh, i))\n return new\n\n def get_atoms(self, mat, nuc, nuc_units=\"atoms\", time_units=\"s\"):\n \"\"\"Get number of nuclides over time from a single material\n\n .. note::\n Initial values for some isotopes that do not appear in\n initial concentrations may be non-zero, depending on the\n value of :class:`openmc.deplete.Operator` ``dilute_initial``.\n The :class:`openmc.deplete.Operator` adds isotopes according\n to this setting, which can be set to zero.\n\n Parameters\n ----------\n mat : str\n Material name to evaluate\n nuc : str\n Nuclide name to evaluate\n nuc_units : {\"atoms\", \"atom/b-cm\", \"atom/cm3\"}, optional\n Units for the returned concentration. Default is ``\"atoms\"``\n\n .. versionadded:: 0.12\n time_units : {\"s\", \"min\", \"h\", \"d\"}, optional\n Units for the returned time array. Default is ``\"s\"`` to\n return the value in seconds.\n\n .. versionadded:: 0.12\n\n Returns\n -------\n times : numpy.ndarray\n Array of times in units of ``time_units``\n concentrations : numpy.ndarray\n Concentration of specified nuclide in units of ``nuc_units``\n\n \"\"\"\n check_value(\"time_units\", time_units, {\"s\", \"d\", \"min\", \"h\"})\n check_value(\"nuc_units\", nuc_units,\n {\"atoms\", \"atom/b-cm\", \"atom/cm3\"})\n\n times = np.empty_like(self, dtype=float)\n concentrations = np.empty_like(self, dtype=float)\n\n # Evaluate value in each region\n for i, result in enumerate(self):\n times[i] = result.time[0]\n concentrations[i] = result[0, mat, nuc]\n\n # Unit conversions\n if time_units == \"d\":\n times /= (60 * 60 * 24)\n elif time_units == \"h\":\n times /= (60 * 60)\n elif time_units == \"min\":\n times /= 60\n\n if nuc_units != \"atoms\":\n # Divide by volume to get density\n concentrations /= self[0].volume[mat]\n if nuc_units == \"atom/b-cm\":\n # 1 barn = 1e-24 cm^2\n concentrations *= 1e-24\n\n return times, concentrations\n\n def get_reaction_rate(self, mat, nuc, rx):\n \"\"\"Get reaction rate in a single material/nuclide over time\n\n .. note::\n\n Initial values for some isotopes that do not appear in\n initial concentrations may be non-zero, depending on the\n value of :class:`openmc.deplete.Operator` ``dilute_initial``\n The :class:`openmc.deplete.Operator` adds isotopes according\n to this setting, which can be set to zero.\n\n Parameters\n ----------\n mat : str\n Material name to evaluate\n nuc : str\n Nuclide name to evaluate\n rx : str\n Reaction rate to evaluate\n\n Returns\n -------\n times : numpy.ndarray\n Array of times in [s]\n rates : numpy.ndarray\n Array of reaction rates\n\n \"\"\"\n times = np.empty_like(self, dtype=float)\n rates = np.empty_like(self, dtype=float)\n\n # Evaluate value in each region\n for i, result in enumerate(self):\n times[i] = result.time[0]\n rates[i] = result.rates[0].get(mat, nuc, rx) * result[0, mat, nuc]\n\n return times, rates\n\n def get_eigenvalue(self):\n \"\"\"Evaluates the eigenvalue from a results list.\n\n Returns\n -------\n times : numpy.ndarray\n Array of times in [s]\n eigenvalues : numpy.ndarray\n k-eigenvalue at each time. Column 0\n contains the eigenvalue, while column\n 1 contains the associated uncertainty\n\n \"\"\"\n times = np.empty_like(self, dtype=float)\n eigenvalues = np.empty((len(self), 2), dtype=float)\n\n # Get time/eigenvalue at each point\n for i, result in enumerate(self):\n times[i] = result.time[0]\n eigenvalues[i] = result.k[0]\n\n return times, eigenvalues\n\n def get_depletion_time(self):\n \"\"\"Return an array of the average time to deplete a material\n\n .. note::\n\n Will have one fewer row than number of other methods,\n like :meth:`get_eigenvalues`, because no depletion\n is performed at the final transport stage\n\n Returns\n -------\n times : numpy.ndarray\n Vector of average time to deplete a single material\n across all processes and materials.\n\n \"\"\"\n times = np.empty(len(self) - 1)\n # Need special logic because the predictor\n # writes EOS values for step i as BOS values\n # for step i+1\n # The first proc_time may be zero\n if self[0].proc_time > 0.0:\n items = self[:-1]\n else:\n items = self[1:]\n for ix, res in enumerate(items):\n times[ix] = res.proc_time\n return times\n\n def get_times(self, time_units=\"d\") -> np.ndarray:\n \"\"\"Return the points in time that define the depletion schedule\n\n\n .. versionadded:: 0.12.1\n\n Parameters\n ----------\n time_units : {\"s\", \"d\", \"h\", \"min\"}, optional\n Return the vector in these units. Default is to\n convert to days\n\n Returns\n -------\n numpy.ndarray\n 1-D vector of time points\n\n \"\"\"\n check_type(\"time_units\", time_units, str)\n\n times = np.fromiter(\n (r.time[0] for r in self),\n dtype=self[0].time.dtype,\n count=len(self),\n )\n\n if time_units == \"d\":\n times /= (60 * 60 * 24)\n elif time_units == \"h\":\n times /= (60 * 60)\n elif time_units == \"min\":\n times /= 60\n elif time_units != \"s\":\n raise ValueError(\n 'Unable to set \"time_units\" to {} since it is not '\n 'in (\"s\", \"d\", \"min\", \"h\")'.format(time_units)\n )\n return times\n\n def get_step_where(\n self, time, time_units=\"d\", atol=1e-6, rtol=1e-3\n ) -> int:\n \"\"\"Return the index closest to a given point in time\n\n In the event ``time`` lies exactly between two points, the\n lower index will be returned. It is possible that the index\n will be at most one past the point in time requested, but only\n according to tolerances requested.\n\n Passing ``atol=math.inf`` and ``rtol=math.inf`` will return\n the closest index to the requested point.\n\n\n .. versionadded:: 0.12.1\n\n Parameters\n ----------\n time : float\n Desired point in time\n time_units : {\"s\", \"d\", \"min\", \"h\"}, optional\n Units on ``time``. Default: days\n atol : float, optional\n Absolute tolerance (in ``time_units``) if ``time`` is not\n found.\n rtol : float, optional\n Relative tolerance if ``time`` is not found.\n\n Returns\n -------\n int\n\n \"\"\"\n check_type(\"time\", time, numbers.Real)\n check_type(\"atol\", atol, numbers.Real)\n check_type(\"rtol\", rtol, numbers.Real)\n\n times = self.get_times(time_units)\n\n if times[0] < time < times[-1]:\n ix = bisect.bisect_left(times, time)\n if ix == times.size:\n ix -= 1\n # Bisection will place us either directly on the point\n # or one-past the first value less than time\n elif time - times[ix - 1] <= times[ix] - time:\n ix -= 1\n elif times[0] >= time:\n ix = 0\n elif time >= times[-1]:\n ix = times.size - 1\n\n if math.isclose(time, times[ix], rel_tol=rtol, abs_tol=atol):\n return ix\n\n raise ValueError(\n \"A value of {} {} was not found given absolute and \"\n \"relative tolerances {} and {}.\".format(\n time, time_units, atol, rtol)\n )\n", "path": "openmc/deplete/results_list.py"}]} | 2,400 | 1,000 |
gh_patches_debug_18534 | rasdani/github-patches | git_diff | google__flax-1937 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatible variables for Tensorboard hparams are recast to strings but never returned
### Core Problem
Tensorboard hparams only supports a subset of Python and Numpy variable types ([see hparams docstrings](https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py)). The `flax.metrics.tensorboard.SummaryWriter` class's method `SummaryWriter.hparams()` should handle this behavior via the `flax.metrics.tensorboard._flatten_dict()` function, casting incompatible types to strings (which hparams supports). However, despite performing the casting operation, the `_flatten_dict` function does not append the recast variables to the dictionary it returns.
The result, for the below example, is that the "hidden_layers" parameters are silently excluded and do not appear in Tensorboard's hparams.
```Python
from flax.metrics import tensorboard
experiment_dir = "./Example"
network_hyperparameters = {
"hidden_layers_list": [12,12],
"hidden_layers_tuple": (12,12),
"dropout_rate": 1.0,
}
summary_writer = tensorboard.SummaryWriter(experiment_dir)
summary_writer.hparams(network_hyperparameters)
summary_writer.scalar('Training loss', 0.1, 1)
summary_writer.flush()
```
### Colab Example:
[Example notebook](https://colab.research.google.com/gist/tttc3/8dd7ef04c4222bc18fb03b043d370120/falx_tensorboard_issue_demo.ipynb)
### Proposed fix
Modify `_flattened_dict` to explicitly check if a dictionary value is one of those supported by Tensorboard's hparams api, as defined [here](https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py). If the value is not supported, cast it to a string and append it to the dictionary that `_flattened_dict` normally returns.
**Current _flatten_dict code**
```Python
def _flatten_dict(input_dict, parent_key='', sep='.'):
"""Flattens and simplifies dict such that it can be used by hparams.
Args:
input_dict: Input dict, e.g., from ConfigDict.
parent_key: String used in recursion.
sep: String used to separate parent and child keys.
Returns:
Flattened dict.
"""
items = []
for k, v in input_dict.items():
new_key = parent_key + sep + k if parent_key else k
# Take special care of things hparams cannot handle.
if v is None:
v = 'None'
elif isinstance(v, list):
v = str(v)
elif isinstance(v, tuple):
v = str(v)
elif isinstance(v, dict):
# Recursively flatten the dict.
items.extend(_flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
```
**Proposed _flatten_dict code modification**
```Python
def _flatten_dict(input_dict, parent_key='', sep='.'):
"""Flattens and simplifies dict such that it can be used by hparams.
Args:
input_dict: Input dict, e.g., from ConfigDict.
parent_key: String used in recursion.
sep: String used to separate parent and child keys.
Returns:
Flattened dict.
"""
items = []
for k, v in input_dict.items():
new_key = parent_key + sep + k if parent_key else k
# Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py
valid_types = (bool, int, float, str, np.bool_, np.integer, np.floating, np.character)
if isinstance(v, dict):
# Recursively flatten the dict.
items.extend(_flatten_dict(v, new_key, sep=sep).items())
continue
elif not isinstance(v, valid_types):
# Cast any incompatible values as strings such that they can be handled by hparams
v = str(v)
items.append((new_key, v))
return dict(items)
```
I am happy submit a pull request with the modifications.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/metrics/tensorboard.py`
Content:
```
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Write Summaries from JAX for use with Tensorboard.
16 """
17
18 import os
19
20 # pylint: disable=g-import-not-at-top
21 import numpy as np
22
23 import tensorflow.compat.v2 as tf
24 from tensorboard.plugins.hparams import api as hparams_api
25
26
27 def _flatten_dict(input_dict, parent_key='', sep='.'):
28 """Flattens and simplifies dict such that it can be used by hparams.
29
30 Args:
31 input_dict: Input dict, e.g., from ConfigDict.
32 parent_key: String used in recursion.
33 sep: String used to separate parent and child keys.
34
35 Returns:
36 Flattened dict.
37 """
38 items = []
39 for k, v in input_dict.items():
40 new_key = parent_key + sep + k if parent_key else k
41
42 # Take special care of things hparams cannot handle.
43 if v is None:
44 v = 'None'
45 elif isinstance(v, list):
46 v = str(v)
47 elif isinstance(v, tuple):
48 v = str(v)
49 elif isinstance(v, dict):
50 # Recursively flatten the dict.
51 items.extend(_flatten_dict(v, new_key, sep=sep).items())
52 else:
53 items.append((new_key, v))
54 return dict(items)
55
56
57 class SummaryWriter(object):
58 """Saves data in event and summary protos for tensorboard."""
59
60 def __init__(self, log_dir):
61 """Create a new SummaryWriter.
62
63 Args:
64 log_dir: path to record tfevents files in.
65 """
66 log_dir = os.fspath(log_dir)
67
68 # If needed, create log_dir directory as well as missing parent directories.
69 if not tf.io.gfile.isdir(log_dir):
70 tf.io.gfile.makedirs(log_dir)
71
72 self._event_writer = tf.summary.create_file_writer(log_dir, 10, 120, None)
73 self._closed = False
74
75 def close(self):
76 """Close SummaryWriter. Final!"""
77 if not self._closed:
78 self._event_writer.close()
79 self._closed = True
80 del self._event_writer
81
82 def flush(self):
83 self._event_writer.flush()
84
85 def scalar(self, tag, value, step):
86 """Saves scalar value.
87
88 Args:
89 tag: str: label for this data
90 value: int/float: number to log
91 step: int: training step
92 """
93 value = float(np.array(value))
94 with self._event_writer.as_default():
95 tf.summary.scalar(name=tag, data=value, step=step)
96
97 def image(self, tag, image, step, max_outputs=3):
98 """Saves RGB image summary from np.ndarray [H,W], [H,W,1], or [H,W,3].
99
100 Args:
101 tag: str: label for this data
102 image: ndarray: [H,W], [H,W,1], [H,W,3], [K,H,W], [K,H,W,1], [K,H,W,3]
103 Save image in greyscale or colors.
104 Pixel values could be either uint8 or float.
105 Floating point values should be in range [0, 1).
106 step: int: training step
107 max_outputs: At most this many images will be emitted at each step.
108 Defaults to 3.
109 """
110 image = np.array(image)
111 # tf.summary.image expects image to have shape [k, h, w, c] where,
112 # k = number of samples, h = height, w = width, c = number of channels.
113 if len(np.shape(image)) == 2:
114 image = image[np.newaxis, :, :, np.newaxis]
115 elif len(np.shape(image)) == 3:
116 # this could be either [k, h, w] or [h, w, c]
117 if np.shape(image)[-1] in (1, 3):
118 image = image[np.newaxis, :, :, :]
119 else:
120 image = image[:, :, :, np.newaxis]
121 if np.shape(image)[-1] == 1:
122 image = np.repeat(image, 3, axis=-1)
123
124 # Convert to tensor value as tf.summary.image expects data to be a tensor.
125 image = tf.convert_to_tensor(image)
126 with self._event_writer.as_default():
127 tf.summary.image(name=tag, data=image, step=step, max_outputs=max_outputs)
128
129 def audio(self, tag, audiodata, step, sample_rate=44100, max_outputs=3):
130 """Saves audio as wave.
131
132 NB: single channel only right now.
133
134 Args:
135 tag: str: label for this data
136 audiodata: ndarray [Nsamples, Nframes, Nchannels]: audio data to
137 be saved as wave. The data will be clipped to [-1.0, 1.0].
138 step: int: training step
139 sample_rate: sample rate of passed in audio buffer
140 max_outputs: At most this many audio clips will be emitted at each
141 step. Defaults to 3.
142 """
143 # tf.summary.audio expects the audio data to have floating values in
144 # [-1.0, 1.0].
145 audiodata = np.clip(np.array(audiodata), -1, 1)
146
147 # Convert to tensor value as tf.summary.audio expects data to be a tensor.
148 audio = tf.convert_to_tensor(audiodata, dtype=tf.float32)
149 with self._event_writer.as_default():
150 tf.summary.audio(
151 name=tag, data=audio, sample_rate=sample_rate, step=step,
152 max_outputs=max_outputs, encoding='wav')
153
154 def histogram(self, tag, values, step, bins=None):
155 """Saves histogram of values.
156
157 Args:
158 tag: str: label for this data
159 values: ndarray: will be flattened by this routine
160 step: int: training step
161 bins: number of bins in histogram
162 """
163 values = np.array(values)
164 values = np.reshape(values, -1)
165 with self._event_writer.as_default():
166 tf.summary.histogram(name=tag, data=values, step=step, buckets=bins)
167
168 def text(self, tag, textdata, step):
169 """Saves a text summary.
170
171 Args:
172 tag: str: label for this data
173 textdata: string
174 step: int: training step
175 Note: markdown formatting is rendered by tensorboard.
176 """
177 if not isinstance(textdata, (str, bytes)):
178 raise ValueError('`textdata` should be of the type `str` or `bytes`.')
179 with self._event_writer.as_default():
180 tf.summary.text(name=tag, data=tf.constant(textdata), step=step)
181
182 def write(self, tag, tensor, step, metadata=None):
183 """Saves a arbitrary tensor summary.
184
185 Useful when working with custom plugins or constructing a summary directly.
186
187 Args:
188 tag: str: label for this data
189 tensor: ndarray: tensor data to save.
190 step: int: training step
191 metadata: Optional SummaryMetadata, as a proto or serialized bytes.
192 Note: markdown formatting is rendered by tensorboard.
193 """
194 with self._event_writer.as_default():
195 tf.summary.write(
196 tag=tag,
197 tensor=tensor,
198 step=step,
199 metadata=metadata)
200
201 def hparams(self, hparams):
202 """Saves hyper parameters.
203
204 Args:
205 hparams: Flat mapping from hyper parameter name to value.
206 """
207
208 with self._event_writer.as_default():
209 hparams_api.hparams(hparams=_flatten_dict(hparams))
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/metrics/tensorboard.py b/flax/metrics/tensorboard.py
--- a/flax/metrics/tensorboard.py
+++ b/flax/metrics/tensorboard.py
@@ -39,18 +39,17 @@
for k, v in input_dict.items():
new_key = parent_key + sep + k if parent_key else k
- # Take special care of things hparams cannot handle.
- if v is None:
- v = 'None'
- elif isinstance(v, list):
- v = str(v)
- elif isinstance(v, tuple):
- v = str(v)
- elif isinstance(v, dict):
+ # Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py
+ valid_types = (bool, int, float, str, np.bool_, np.integer, np.floating, np.character)
+
+ if isinstance(v, dict):
# Recursively flatten the dict.
items.extend(_flatten_dict(v, new_key, sep=sep).items())
- else:
- items.append((new_key, v))
+ continue
+ elif not isinstance(v, valid_types):
+ # Cast any incompatible values as strings such that they can be handled by hparams
+ v = str(v)
+ items.append((new_key, v))
return dict(items)
| {"golden_diff": "diff --git a/flax/metrics/tensorboard.py b/flax/metrics/tensorboard.py\n--- a/flax/metrics/tensorboard.py\n+++ b/flax/metrics/tensorboard.py\n@@ -39,18 +39,17 @@\n for k, v in input_dict.items():\n new_key = parent_key + sep + k if parent_key else k\n \n- # Take special care of things hparams cannot handle.\n- if v is None:\n- v = 'None'\n- elif isinstance(v, list):\n- v = str(v)\n- elif isinstance(v, tuple):\n- v = str(v)\n- elif isinstance(v, dict):\n+ # Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py\n+ valid_types = (bool, int, float, str, np.bool_, np.integer, np.floating, np.character)\n+\n+ if isinstance(v, dict):\n # Recursively flatten the dict.\n items.extend(_flatten_dict(v, new_key, sep=sep).items())\n- else:\n- items.append((new_key, v))\n+ continue\n+ elif not isinstance(v, valid_types):\n+ # Cast any incompatible values as strings such that they can be handled by hparams\n+ v = str(v)\n+ items.append((new_key, v))\n return dict(items)\n", "issue": "Incompatible variables for Tensorboard hparams are recast to strings but never returned\n### Core Problem\r\nTensorboard hparams only supports a subset of Python and Numpy variable types ([see hparams docstrings](https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py)). The `flax.metrics.tensorboard.SummaryWriter` class's method `SummaryWriter.hparams()` should handle this behavior via the `flax.metrics.tensorboard._flatten_dict()` function, casting incompatible types to strings (which hparams supports). However, despite performing the casting operation, the `_flatten_dict` function does not append the recast variables to the dictionary it returns. \r\n\r\nThe result, for the below example, is that the \"hidden_layers\" parameters are silently excluded and do not appear in Tensorboard's hparams.\r\n\r\n```Python \r\nfrom flax.metrics import tensorboard\r\n\r\nexperiment_dir = \"./Example\"\r\n\r\nnetwork_hyperparameters = {\r\n \"hidden_layers_list\": [12,12],\r\n \"hidden_layers_tuple\": (12,12),\r\n \"dropout_rate\": 1.0,\r\n}\r\n\r\nsummary_writer = tensorboard.SummaryWriter(experiment_dir)\r\nsummary_writer.hparams(network_hyperparameters)\r\nsummary_writer.scalar('Training loss', 0.1, 1)\r\nsummary_writer.flush()\r\n```\r\n\r\n### Colab Example:\r\n[Example notebook](https://colab.research.google.com/gist/tttc3/8dd7ef04c4222bc18fb03b043d370120/falx_tensorboard_issue_demo.ipynb)\r\n\r\n### Proposed fix\r\nModify `_flattened_dict` to explicitly check if a dictionary value is one of those supported by Tensorboard's hparams api, as defined [here](https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py). If the value is not supported, cast it to a string and append it to the dictionary that `_flattened_dict` normally returns.\r\n\r\n**Current _flatten_dict code**\r\n```Python\r\ndef _flatten_dict(input_dict, parent_key='', sep='.'):\r\n \"\"\"Flattens and simplifies dict such that it can be used by hparams.\r\n\r\n Args:\r\n input_dict: Input dict, e.g., from ConfigDict.\r\n parent_key: String used in recursion.\r\n sep: String used to separate parent and child keys.\r\n\r\n Returns:\r\n Flattened dict.\r\n \"\"\"\r\n items = []\r\n for k, v in input_dict.items():\r\n new_key = parent_key + sep + k if parent_key else k\r\n\r\n # Take special care of things hparams cannot handle.\r\n if v is None:\r\n v = 'None'\r\n elif isinstance(v, list):\r\n v = str(v)\r\n elif isinstance(v, tuple):\r\n v = str(v)\r\n elif isinstance(v, dict):\r\n # Recursively flatten the dict.\r\n items.extend(_flatten_dict(v, new_key, sep=sep).items())\r\n else:\r\n items.append((new_key, v))\r\n return dict(items)\r\n```\r\n\r\n**Proposed _flatten_dict code modification**\r\n```Python\r\ndef _flatten_dict(input_dict, parent_key='', sep='.'):\r\n \"\"\"Flattens and simplifies dict such that it can be used by hparams.\r\n\r\n Args:\r\n input_dict: Input dict, e.g., from ConfigDict.\r\n parent_key: String used in recursion.\r\n sep: String used to separate parent and child keys.\r\n\r\n Returns:\r\n Flattened dict.\r\n \"\"\"\r\n items = []\r\n for k, v in input_dict.items():\r\n new_key = parent_key + sep + k if parent_key else k\r\n\r\n # Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py\r\n valid_types = (bool, int, float, str, np.bool_, np.integer, np.floating, np.character)\r\n\r\n if isinstance(v, dict):\r\n # Recursively flatten the dict.\r\n items.extend(_flatten_dict(v, new_key, sep=sep).items())\r\n continue\r\n elif not isinstance(v, valid_types):\r\n # Cast any incompatible values as strings such that they can be handled by hparams\r\n v = str(v)\r\n items.append((new_key, v))\r\n return dict(items)\r\n```\r\n\r\nI am happy submit a pull request with the modifications.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Write Summaries from JAX for use with Tensorboard.\n\"\"\"\n\nimport os\n\n# pylint: disable=g-import-not-at-top\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\nfrom tensorboard.plugins.hparams import api as hparams_api\n\n\ndef _flatten_dict(input_dict, parent_key='', sep='.'):\n \"\"\"Flattens and simplifies dict such that it can be used by hparams.\n\n Args:\n input_dict: Input dict, e.g., from ConfigDict.\n parent_key: String used in recursion.\n sep: String used to separate parent and child keys.\n\n Returns:\n Flattened dict.\n \"\"\"\n items = []\n for k, v in input_dict.items():\n new_key = parent_key + sep + k if parent_key else k\n\n # Take special care of things hparams cannot handle.\n if v is None:\n v = 'None'\n elif isinstance(v, list):\n v = str(v)\n elif isinstance(v, tuple):\n v = str(v)\n elif isinstance(v, dict):\n # Recursively flatten the dict.\n items.extend(_flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\nclass SummaryWriter(object):\n \"\"\"Saves data in event and summary protos for tensorboard.\"\"\"\n\n def __init__(self, log_dir):\n \"\"\"Create a new SummaryWriter.\n\n Args:\n log_dir: path to record tfevents files in.\n \"\"\"\n log_dir = os.fspath(log_dir)\n\n # If needed, create log_dir directory as well as missing parent directories.\n if not tf.io.gfile.isdir(log_dir):\n tf.io.gfile.makedirs(log_dir)\n\n self._event_writer = tf.summary.create_file_writer(log_dir, 10, 120, None)\n self._closed = False\n\n def close(self):\n \"\"\"Close SummaryWriter. Final!\"\"\"\n if not self._closed:\n self._event_writer.close()\n self._closed = True\n del self._event_writer\n\n def flush(self):\n self._event_writer.flush()\n\n def scalar(self, tag, value, step):\n \"\"\"Saves scalar value.\n\n Args:\n tag: str: label for this data\n value: int/float: number to log\n step: int: training step\n \"\"\"\n value = float(np.array(value))\n with self._event_writer.as_default():\n tf.summary.scalar(name=tag, data=value, step=step)\n\n def image(self, tag, image, step, max_outputs=3):\n \"\"\"Saves RGB image summary from np.ndarray [H,W], [H,W,1], or [H,W,3].\n\n Args:\n tag: str: label for this data\n image: ndarray: [H,W], [H,W,1], [H,W,3], [K,H,W], [K,H,W,1], [K,H,W,3]\n Save image in greyscale or colors.\n Pixel values could be either uint8 or float.\n Floating point values should be in range [0, 1).\n step: int: training step\n max_outputs: At most this many images will be emitted at each step.\n Defaults to 3.\n \"\"\"\n image = np.array(image)\n # tf.summary.image expects image to have shape [k, h, w, c] where,\n # k = number of samples, h = height, w = width, c = number of channels.\n if len(np.shape(image)) == 2:\n image = image[np.newaxis, :, :, np.newaxis]\n elif len(np.shape(image)) == 3:\n # this could be either [k, h, w] or [h, w, c]\n if np.shape(image)[-1] in (1, 3):\n image = image[np.newaxis, :, :, :]\n else:\n image = image[:, :, :, np.newaxis]\n if np.shape(image)[-1] == 1:\n image = np.repeat(image, 3, axis=-1)\n\n # Convert to tensor value as tf.summary.image expects data to be a tensor.\n image = tf.convert_to_tensor(image)\n with self._event_writer.as_default():\n tf.summary.image(name=tag, data=image, step=step, max_outputs=max_outputs)\n\n def audio(self, tag, audiodata, step, sample_rate=44100, max_outputs=3):\n \"\"\"Saves audio as wave.\n\n NB: single channel only right now.\n\n Args:\n tag: str: label for this data\n audiodata: ndarray [Nsamples, Nframes, Nchannels]: audio data to\n be saved as wave. The data will be clipped to [-1.0, 1.0].\n step: int: training step\n sample_rate: sample rate of passed in audio buffer\n max_outputs: At most this many audio clips will be emitted at each\n step. Defaults to 3.\n \"\"\"\n # tf.summary.audio expects the audio data to have floating values in\n # [-1.0, 1.0].\n audiodata = np.clip(np.array(audiodata), -1, 1)\n\n # Convert to tensor value as tf.summary.audio expects data to be a tensor.\n audio = tf.convert_to_tensor(audiodata, dtype=tf.float32)\n with self._event_writer.as_default():\n tf.summary.audio(\n name=tag, data=audio, sample_rate=sample_rate, step=step,\n max_outputs=max_outputs, encoding='wav')\n\n def histogram(self, tag, values, step, bins=None):\n \"\"\"Saves histogram of values.\n\n Args:\n tag: str: label for this data\n values: ndarray: will be flattened by this routine\n step: int: training step\n bins: number of bins in histogram\n \"\"\"\n values = np.array(values)\n values = np.reshape(values, -1)\n with self._event_writer.as_default():\n tf.summary.histogram(name=tag, data=values, step=step, buckets=bins)\n\n def text(self, tag, textdata, step):\n \"\"\"Saves a text summary.\n\n Args:\n tag: str: label for this data\n textdata: string\n step: int: training step\n Note: markdown formatting is rendered by tensorboard.\n \"\"\"\n if not isinstance(textdata, (str, bytes)):\n raise ValueError('`textdata` should be of the type `str` or `bytes`.')\n with self._event_writer.as_default():\n tf.summary.text(name=tag, data=tf.constant(textdata), step=step)\n\n def write(self, tag, tensor, step, metadata=None):\n \"\"\"Saves a arbitrary tensor summary.\n\n Useful when working with custom plugins or constructing a summary directly.\n\n Args:\n tag: str: label for this data\n tensor: ndarray: tensor data to save.\n step: int: training step\n metadata: Optional SummaryMetadata, as a proto or serialized bytes.\n Note: markdown formatting is rendered by tensorboard.\n \"\"\"\n with self._event_writer.as_default():\n tf.summary.write(\n tag=tag,\n tensor=tensor,\n step=step,\n metadata=metadata)\n\n def hparams(self, hparams):\n \"\"\"Saves hyper parameters.\n\n Args:\n hparams: Flat mapping from hyper parameter name to value.\n \"\"\"\n\n with self._event_writer.as_default():\n hparams_api.hparams(hparams=_flatten_dict(hparams))\n", "path": "flax/metrics/tensorboard.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Write Summaries from JAX for use with Tensorboard.\n\"\"\"\n\nimport os\n\n# pylint: disable=g-import-not-at-top\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\nfrom tensorboard.plugins.hparams import api as hparams_api\n\n\ndef _flatten_dict(input_dict, parent_key='', sep='.'):\n \"\"\"Flattens and simplifies dict such that it can be used by hparams.\n\n Args:\n input_dict: Input dict, e.g., from ConfigDict.\n parent_key: String used in recursion.\n sep: String used to separate parent and child keys.\n\n Returns:\n Flattened dict.\n \"\"\"\n items = []\n for k, v in input_dict.items():\n new_key = parent_key + sep + k if parent_key else k\n\n # Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py\n valid_types = (bool, int, float, str, np.bool_, np.integer, np.floating, np.character)\n\n if isinstance(v, dict):\n # Recursively flatten the dict.\n items.extend(_flatten_dict(v, new_key, sep=sep).items())\n continue\n elif not isinstance(v, valid_types):\n # Cast any incompatible values as strings such that they can be handled by hparams\n v = str(v)\n items.append((new_key, v))\n return dict(items)\n\n\nclass SummaryWriter(object):\n \"\"\"Saves data in event and summary protos for tensorboard.\"\"\"\n\n def __init__(self, log_dir):\n \"\"\"Create a new SummaryWriter.\n\n Args:\n log_dir: path to record tfevents files in.\n \"\"\"\n log_dir = os.fspath(log_dir)\n\n # If needed, create log_dir directory as well as missing parent directories.\n if not tf.io.gfile.isdir(log_dir):\n tf.io.gfile.makedirs(log_dir)\n\n self._event_writer = tf.summary.create_file_writer(log_dir, 10, 120, None)\n self._closed = False\n\n def close(self):\n \"\"\"Close SummaryWriter. Final!\"\"\"\n if not self._closed:\n self._event_writer.close()\n self._closed = True\n del self._event_writer\n\n def flush(self):\n self._event_writer.flush()\n\n def scalar(self, tag, value, step):\n \"\"\"Saves scalar value.\n\n Args:\n tag: str: label for this data\n value: int/float: number to log\n step: int: training step\n \"\"\"\n value = float(np.array(value))\n with self._event_writer.as_default():\n tf.summary.scalar(name=tag, data=value, step=step)\n\n def image(self, tag, image, step, max_outputs=3):\n \"\"\"Saves RGB image summary from np.ndarray [H,W], [H,W,1], or [H,W,3].\n\n Args:\n tag: str: label for this data\n image: ndarray: [H,W], [H,W,1], [H,W,3], [K,H,W], [K,H,W,1], [K,H,W,3]\n Save image in greyscale or colors.\n Pixel values could be either uint8 or float.\n Floating point values should be in range [0, 1).\n step: int: training step\n max_outputs: At most this many images will be emitted at each step.\n Defaults to 3.\n \"\"\"\n image = np.array(image)\n # tf.summary.image expects image to have shape [k, h, w, c] where,\n # k = number of samples, h = height, w = width, c = number of channels.\n if len(np.shape(image)) == 2:\n image = image[np.newaxis, :, :, np.newaxis]\n elif len(np.shape(image)) == 3:\n # this could be either [k, h, w] or [h, w, c]\n if np.shape(image)[-1] in (1, 3):\n image = image[np.newaxis, :, :, :]\n else:\n image = image[:, :, :, np.newaxis]\n if np.shape(image)[-1] == 1:\n image = np.repeat(image, 3, axis=-1)\n\n # Convert to tensor value as tf.summary.image expects data to be a tensor.\n image = tf.convert_to_tensor(image)\n with self._event_writer.as_default():\n tf.summary.image(name=tag, data=image, step=step, max_outputs=max_outputs)\n\n def audio(self, tag, audiodata, step, sample_rate=44100, max_outputs=3):\n \"\"\"Saves audio as wave.\n\n NB: single channel only right now.\n\n Args:\n tag: str: label for this data\n audiodata: ndarray [Nsamples, Nframes, Nchannels]: audio data to\n be saved as wave. The data will be clipped to [-1.0, 1.0].\n step: int: training step\n sample_rate: sample rate of passed in audio buffer\n max_outputs: At most this many audio clips will be emitted at each\n step. Defaults to 3.\n \"\"\"\n # tf.summary.audio expects the audio data to have floating values in\n # [-1.0, 1.0].\n audiodata = np.clip(np.array(audiodata), -1, 1)\n\n # Convert to tensor value as tf.summary.audio expects data to be a tensor.\n audio = tf.convert_to_tensor(audiodata, dtype=tf.float32)\n with self._event_writer.as_default():\n tf.summary.audio(\n name=tag, data=audio, sample_rate=sample_rate, step=step,\n max_outputs=max_outputs, encoding='wav')\n\n def histogram(self, tag, values, step, bins=None):\n \"\"\"Saves histogram of values.\n\n Args:\n tag: str: label for this data\n values: ndarray: will be flattened by this routine\n step: int: training step\n bins: number of bins in histogram\n \"\"\"\n values = np.array(values)\n values = np.reshape(values, -1)\n with self._event_writer.as_default():\n tf.summary.histogram(name=tag, data=values, step=step, buckets=bins)\n\n def text(self, tag, textdata, step):\n \"\"\"Saves a text summary.\n\n Args:\n tag: str: label for this data\n textdata: string\n step: int: training step\n Note: markdown formatting is rendered by tensorboard.\n \"\"\"\n if not isinstance(textdata, (str, bytes)):\n raise ValueError('`textdata` should be of the type `str` or `bytes`.')\n with self._event_writer.as_default():\n tf.summary.text(name=tag, data=tf.constant(textdata), step=step)\n\n def write(self, tag, tensor, step, metadata=None):\n \"\"\"Saves a arbitrary tensor summary.\n\n Useful when working with custom plugins or constructing a summary directly.\n\n Args:\n tag: str: label for this data\n tensor: ndarray: tensor data to save.\n step: int: training step\n metadata: Optional SummaryMetadata, as a proto or serialized bytes.\n Note: markdown formatting is rendered by tensorboard.\n \"\"\"\n with self._event_writer.as_default():\n tf.summary.write(\n tag=tag,\n tensor=tensor,\n step=step,\n metadata=metadata)\n\n def hparams(self, hparams):\n \"\"\"Saves hyper parameters.\n\n Args:\n hparams: Flat mapping from hyper parameter name to value.\n \"\"\"\n\n with self._event_writer.as_default():\n hparams_api.hparams(hparams=_flatten_dict(hparams))\n", "path": "flax/metrics/tensorboard.py"}]} | 3,590 | 347 |
gh_patches_debug_37954 | rasdani/github-patches | git_diff | dask__distributed-3854 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support different usernames when starting an SSH cluster
I wanted to start an SSH cluster where my username is not same on all the nodes. It would be great if this was supported. The most obvious way to configure this would be to allow specifying hosts as `user@hostname`, either on the command line, or the hosts file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/deploy/ssh.py`
Content:
```
1 import logging
2 import sys
3 from typing import List
4 import warnings
5 import weakref
6
7 import dask
8
9 from .spec import SpecCluster, ProcessInterface
10 from ..utils import cli_keywords
11 from ..scheduler import Scheduler as _Scheduler
12 from ..worker import Worker as _Worker
13 from ..utils import serialize_for_cli
14
15 logger = logging.getLogger(__name__)
16
17
18 class Process(ProcessInterface):
19 """ A superclass for SSH Workers and Nannies
20
21 See Also
22 --------
23 Worker
24 Scheduler
25 """
26
27 def __init__(self, **kwargs):
28 self.connection = None
29 self.proc = None
30 super().__init__(**kwargs)
31
32 async def start(self):
33 assert self.connection
34 weakref.finalize(
35 self, self.proc.kill
36 ) # https://github.com/ronf/asyncssh/issues/112
37 await super().start()
38
39 async def close(self):
40 self.proc.kill() # https://github.com/ronf/asyncssh/issues/112
41 self.connection.close()
42 await super().close()
43
44 def __repr__(self):
45 return "<SSH %s: status=%s>" % (type(self).__name__, self.status)
46
47
48 class Worker(Process):
49 """ A Remote Dask Worker controled by SSH
50
51 Parameters
52 ----------
53 scheduler: str
54 The address of the scheduler
55 address: str
56 The hostname where we should run this worker
57 worker_module: str
58 The python module to run to start the worker.
59 connect_options: dict
60 kwargs to be passed to asyncssh connections
61 remote_python: str
62 Path to Python on remote node to run this worker.
63 kwargs: dict
64 These will be passed through the dask-worker CLI to the
65 dask.distributed.Worker class
66 """
67
68 def __init__(
69 self,
70 scheduler: str,
71 address: str,
72 connect_options: dict,
73 kwargs: dict,
74 worker_module="distributed.cli.dask_worker",
75 remote_python=None,
76 loop=None,
77 name=None,
78 ):
79 super().__init__()
80
81 self.address = address
82 self.scheduler = scheduler
83 self.worker_module = worker_module
84 self.connect_options = connect_options
85 self.kwargs = kwargs
86 self.name = name
87 self.remote_python = remote_python
88
89 async def start(self):
90 import asyncssh # import now to avoid adding to module startup time
91
92 self.connection = await asyncssh.connect(self.address, **self.connect_options)
93
94 result = await self.connection.run("uname")
95 if result.exit_status == 0:
96 set_env = 'env DASK_INTERNAL_INHERIT_CONFIG="{}"'.format(
97 serialize_for_cli(dask.config.global_config)
98 )
99 else:
100 result = await self.connection.run("cmd /c ver")
101 if result.exit_status == 0:
102 set_env = "set DASK_INTERNAL_INHERIT_CONFIG={} &&".format(
103 serialize_for_cli(dask.config.global_config)
104 )
105 else:
106 raise Exception(
107 "Worker failed to set DASK_INTERNAL_INHERIT_CONFIG variable "
108 )
109
110 cmd = " ".join(
111 [
112 set_env,
113 self.remote_python or sys.executable,
114 "-m",
115 self.worker_module,
116 self.scheduler,
117 "--name",
118 str(self.name),
119 ]
120 + cli_keywords(self.kwargs, cls=_Worker, cmd=self.worker_module)
121 )
122
123 self.proc = await self.connection.create_process(cmd)
124
125 # We watch stderr in order to get the address, then we return
126 while True:
127 line = await self.proc.stderr.readline()
128 if not line.strip():
129 raise Exception("Worker failed to start")
130 logger.info(line.strip())
131 if "worker at" in line:
132 self.address = line.split("worker at:")[1].strip()
133 self.status = "running"
134 break
135 logger.debug("%s", line)
136 await super().start()
137
138
139 class Scheduler(Process):
140 """ A Remote Dask Scheduler controlled by SSH
141
142 Parameters
143 ----------
144 address: str
145 The hostname where we should run this worker
146 connect_options: dict
147 kwargs to be passed to asyncssh connections
148 remote_python: str
149 Path to Python on remote node to run this scheduler.
150 kwargs: dict
151 These will be passed through the dask-scheduler CLI to the
152 dask.distributed.Scheduler class
153 """
154
155 def __init__(
156 self, address: str, connect_options: dict, kwargs: dict, remote_python=None
157 ):
158 super().__init__()
159
160 self.address = address
161 self.kwargs = kwargs
162 self.connect_options = connect_options
163 self.remote_python = remote_python
164
165 async def start(self):
166 import asyncssh # import now to avoid adding to module startup time
167
168 logger.debug("Created Scheduler Connection")
169
170 self.connection = await asyncssh.connect(self.address, **self.connect_options)
171
172 result = await self.connection.run("uname")
173 if result.exit_status == 0:
174 set_env = 'env DASK_INTERNAL_INHERIT_CONFIG="{}"'.format(
175 serialize_for_cli(dask.config.global_config)
176 )
177 else:
178 result = await self.connection.run("cmd /c ver")
179 if result.exit_status == 0:
180 set_env = "set DASK_INTERNAL_INHERIT_CONFIG={} &&".format(
181 serialize_for_cli(dask.config.global_config)
182 )
183 else:
184 raise Exception(
185 "Scheduler failed to set DASK_INTERNAL_INHERIT_CONFIG variable "
186 )
187
188 cmd = " ".join(
189 [
190 set_env,
191 self.remote_python or sys.executable,
192 "-m",
193 "distributed.cli.dask_scheduler",
194 ]
195 + cli_keywords(self.kwargs, cls=_Scheduler)
196 )
197 self.proc = await self.connection.create_process(cmd)
198
199 # We watch stderr in order to get the address, then we return
200 while True:
201 line = await self.proc.stderr.readline()
202 if not line.strip():
203 raise Exception("Worker failed to start")
204 logger.info(line.strip())
205 if "Scheduler at" in line:
206 self.address = line.split("Scheduler at:")[1].strip()
207 break
208 logger.debug("%s", line)
209 await super().start()
210
211
212 old_cluster_kwargs = {
213 "scheduler_addr",
214 "scheduler_port",
215 "worker_addrs",
216 "nthreads",
217 "nprocs",
218 "ssh_username",
219 "ssh_port",
220 "ssh_private_key",
221 "nohost",
222 "logdir",
223 "remote_python",
224 "memory_limit",
225 "worker_port",
226 "nanny_port",
227 "remote_dask_worker",
228 }
229
230
231 def SSHCluster(
232 hosts: List[str] = None,
233 connect_options: dict = {},
234 worker_options: dict = {},
235 scheduler_options: dict = {},
236 worker_module: str = "distributed.cli.dask_worker",
237 remote_python: str = None,
238 **kwargs
239 ):
240 """ Deploy a Dask cluster using SSH
241
242 The SSHCluster function deploys a Dask Scheduler and Workers for you on a
243 set of machine addresses that you provide. The first address will be used
244 for the scheduler while the rest will be used for the workers (feel free to
245 repeat the first hostname if you want to have the scheduler and worker
246 co-habitate one machine.)
247
248 You may configure the scheduler and workers by passing
249 ``scheduler_options`` and ``worker_options`` dictionary keywords. See the
250 ``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for
251 details on the available options, but the defaults should work in most
252 situations.
253
254 You may configure your use of SSH itself using the ``connect_options``
255 keyword, which passes values to the ``asyncssh.connect`` function. For
256 more information on these see the documentation for the ``asyncssh``
257 library https://asyncssh.readthedocs.io .
258
259 Parameters
260 ----------
261 hosts: List[str]
262 List of hostnames or addresses on which to launch our cluster.
263 The first will be used for the scheduler and the rest for workers.
264 connect_options: dict, optional
265 Keywords to pass through to ``asyncssh.connect``.
266 worker_options: dict, optional
267 Keywords to pass on to workers.
268 scheduler_options: dict, optional
269 Keywords to pass on to scheduler.
270 worker_module: str, optional
271 Python module to call to start the worker.
272 remote_python: str, optional
273 Path to Python on remote nodes.
274
275 Examples
276 --------
277 >>> from dask.distributed import Client, SSHCluster
278 >>> cluster = SSHCluster(
279 ... ["localhost", "localhost", "localhost", "localhost"],
280 ... connect_options={"known_hosts": None},
281 ... worker_options={"nthreads": 2},
282 ... scheduler_options={"port": 0, "dashboard_address": ":8797"}
283 ... )
284 >>> client = Client(cluster)
285
286 An example using a different worker module, in particular the
287 ``dask-cuda-worker`` command from the ``dask-cuda`` project.
288
289 >>> from dask.distributed import Client, SSHCluster
290 >>> cluster = SSHCluster(
291 ... ["localhost", "hostwithgpus", "anothergpuhost"],
292 ... connect_options={"known_hosts": None},
293 ... scheduler_options={"port": 0, "dashboard_address": ":8797"},
294 ... worker_module='dask_cuda.dask_cuda_worker')
295 >>> client = Client(cluster)
296
297 See Also
298 --------
299 dask.distributed.Scheduler
300 dask.distributed.Worker
301 asyncssh.connect
302 """
303 if set(kwargs) & old_cluster_kwargs:
304 from .old_ssh import SSHCluster as OldSSHCluster
305
306 warnings.warn(
307 "Note that the SSHCluster API has been replaced. "
308 "We're routing you to the older implementation. "
309 "This will be removed in the future"
310 )
311 kwargs.setdefault("worker_addrs", hosts)
312 return OldSSHCluster(**kwargs)
313
314 scheduler = {
315 "cls": Scheduler,
316 "options": {
317 "address": hosts[0],
318 "connect_options": connect_options,
319 "kwargs": scheduler_options,
320 "remote_python": remote_python,
321 },
322 }
323 workers = {
324 i: {
325 "cls": Worker,
326 "options": {
327 "address": host,
328 "connect_options": connect_options,
329 "kwargs": worker_options,
330 "worker_module": worker_module,
331 "remote_python": remote_python,
332 },
333 }
334 for i, host in enumerate(hosts[1:])
335 }
336 return SpecCluster(workers, scheduler, name="SSHCluster", **kwargs)
337
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/deploy/ssh.py b/distributed/deploy/ssh.py
--- a/distributed/deploy/ssh.py
+++ b/distributed/deploy/ssh.py
@@ -1,6 +1,6 @@
import logging
import sys
-from typing import List
+from typing import List, Union
import warnings
import weakref
@@ -230,7 +230,7 @@
def SSHCluster(
hosts: List[str] = None,
- connect_options: dict = {},
+ connect_options: Union[List[dict], dict] = {},
worker_options: dict = {},
scheduler_options: dict = {},
worker_module: str = "distributed.cli.dask_worker",
@@ -261,8 +261,9 @@
hosts: List[str]
List of hostnames or addresses on which to launch our cluster.
The first will be used for the scheduler and the rest for workers.
- connect_options: dict, optional
+ connect_options: dict or list of dict, optional
Keywords to pass through to ``asyncssh.connect``.
+ If a list it must have the same length as ``hosts``.
worker_options: dict, optional
Keywords to pass on to workers.
scheduler_options: dict, optional
@@ -311,11 +312,19 @@
kwargs.setdefault("worker_addrs", hosts)
return OldSSHCluster(**kwargs)
+ if isinstance(connect_options, list) and len(connect_options) != len(hosts):
+ raise RuntimeError(
+ "When specifying a list of connect_options you must provide a "
+ "dictionary for each address."
+ )
+
scheduler = {
"cls": Scheduler,
"options": {
"address": hosts[0],
- "connect_options": connect_options,
+ "connect_options": connect_options
+ if isinstance(connect_options, dict)
+ else connect_options[0],
"kwargs": scheduler_options,
"remote_python": remote_python,
},
@@ -325,7 +334,9 @@
"cls": Worker,
"options": {
"address": host,
- "connect_options": connect_options,
+ "connect_options": connect_options
+ if isinstance(connect_options, dict)
+ else connect_options[i + 1],
"kwargs": worker_options,
"worker_module": worker_module,
"remote_python": remote_python,
| {"golden_diff": "diff --git a/distributed/deploy/ssh.py b/distributed/deploy/ssh.py\n--- a/distributed/deploy/ssh.py\n+++ b/distributed/deploy/ssh.py\n@@ -1,6 +1,6 @@\n import logging\n import sys\n-from typing import List\n+from typing import List, Union\n import warnings\n import weakref\n \n@@ -230,7 +230,7 @@\n \n def SSHCluster(\n hosts: List[str] = None,\n- connect_options: dict = {},\n+ connect_options: Union[List[dict], dict] = {},\n worker_options: dict = {},\n scheduler_options: dict = {},\n worker_module: str = \"distributed.cli.dask_worker\",\n@@ -261,8 +261,9 @@\n hosts: List[str]\n List of hostnames or addresses on which to launch our cluster.\n The first will be used for the scheduler and the rest for workers.\n- connect_options: dict, optional\n+ connect_options: dict or list of dict, optional\n Keywords to pass through to ``asyncssh.connect``.\n+ If a list it must have the same length as ``hosts``.\n worker_options: dict, optional\n Keywords to pass on to workers.\n scheduler_options: dict, optional\n@@ -311,11 +312,19 @@\n kwargs.setdefault(\"worker_addrs\", hosts)\n return OldSSHCluster(**kwargs)\n \n+ if isinstance(connect_options, list) and len(connect_options) != len(hosts):\n+ raise RuntimeError(\n+ \"When specifying a list of connect_options you must provide a \"\n+ \"dictionary for each address.\"\n+ )\n+\n scheduler = {\n \"cls\": Scheduler,\n \"options\": {\n \"address\": hosts[0],\n- \"connect_options\": connect_options,\n+ \"connect_options\": connect_options\n+ if isinstance(connect_options, dict)\n+ else connect_options[0],\n \"kwargs\": scheduler_options,\n \"remote_python\": remote_python,\n },\n@@ -325,7 +334,9 @@\n \"cls\": Worker,\n \"options\": {\n \"address\": host,\n- \"connect_options\": connect_options,\n+ \"connect_options\": connect_options\n+ if isinstance(connect_options, dict)\n+ else connect_options[i + 1],\n \"kwargs\": worker_options,\n \"worker_module\": worker_module,\n \"remote_python\": remote_python,\n", "issue": "Support different usernames when starting an SSH cluster\nI wanted to start an SSH cluster where my username is not same on all the nodes. It would be great if this was supported. The most obvious way to configure this would be to allow specifying hosts as `user@hostname`, either on the command line, or the hosts file.\n", "before_files": [{"content": "import logging\nimport sys\nfrom typing import List\nimport warnings\nimport weakref\n\nimport dask\n\nfrom .spec import SpecCluster, ProcessInterface\nfrom ..utils import cli_keywords\nfrom ..scheduler import Scheduler as _Scheduler\nfrom ..worker import Worker as _Worker\nfrom ..utils import serialize_for_cli\n\nlogger = logging.getLogger(__name__)\n\n\nclass Process(ProcessInterface):\n \"\"\" A superclass for SSH Workers and Nannies\n\n See Also\n --------\n Worker\n Scheduler\n \"\"\"\n\n def __init__(self, **kwargs):\n self.connection = None\n self.proc = None\n super().__init__(**kwargs)\n\n async def start(self):\n assert self.connection\n weakref.finalize(\n self, self.proc.kill\n ) # https://github.com/ronf/asyncssh/issues/112\n await super().start()\n\n async def close(self):\n self.proc.kill() # https://github.com/ronf/asyncssh/issues/112\n self.connection.close()\n await super().close()\n\n def __repr__(self):\n return \"<SSH %s: status=%s>\" % (type(self).__name__, self.status)\n\n\nclass Worker(Process):\n \"\"\" A Remote Dask Worker controled by SSH\n\n Parameters\n ----------\n scheduler: str\n The address of the scheduler\n address: str\n The hostname where we should run this worker\n worker_module: str\n The python module to run to start the worker.\n connect_options: dict\n kwargs to be passed to asyncssh connections\n remote_python: str\n Path to Python on remote node to run this worker.\n kwargs: dict\n These will be passed through the dask-worker CLI to the\n dask.distributed.Worker class\n \"\"\"\n\n def __init__(\n self,\n scheduler: str,\n address: str,\n connect_options: dict,\n kwargs: dict,\n worker_module=\"distributed.cli.dask_worker\",\n remote_python=None,\n loop=None,\n name=None,\n ):\n super().__init__()\n\n self.address = address\n self.scheduler = scheduler\n self.worker_module = worker_module\n self.connect_options = connect_options\n self.kwargs = kwargs\n self.name = name\n self.remote_python = remote_python\n\n async def start(self):\n import asyncssh # import now to avoid adding to module startup time\n\n self.connection = await asyncssh.connect(self.address, **self.connect_options)\n\n result = await self.connection.run(\"uname\")\n if result.exit_status == 0:\n set_env = 'env DASK_INTERNAL_INHERIT_CONFIG=\"{}\"'.format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n result = await self.connection.run(\"cmd /c ver\")\n if result.exit_status == 0:\n set_env = \"set DASK_INTERNAL_INHERIT_CONFIG={} &&\".format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n raise Exception(\n \"Worker failed to set DASK_INTERNAL_INHERIT_CONFIG variable \"\n )\n\n cmd = \" \".join(\n [\n set_env,\n self.remote_python or sys.executable,\n \"-m\",\n self.worker_module,\n self.scheduler,\n \"--name\",\n str(self.name),\n ]\n + cli_keywords(self.kwargs, cls=_Worker, cmd=self.worker_module)\n )\n\n self.proc = await self.connection.create_process(cmd)\n\n # We watch stderr in order to get the address, then we return\n while True:\n line = await self.proc.stderr.readline()\n if not line.strip():\n raise Exception(\"Worker failed to start\")\n logger.info(line.strip())\n if \"worker at\" in line:\n self.address = line.split(\"worker at:\")[1].strip()\n self.status = \"running\"\n break\n logger.debug(\"%s\", line)\n await super().start()\n\n\nclass Scheduler(Process):\n \"\"\" A Remote Dask Scheduler controlled by SSH\n\n Parameters\n ----------\n address: str\n The hostname where we should run this worker\n connect_options: dict\n kwargs to be passed to asyncssh connections\n remote_python: str\n Path to Python on remote node to run this scheduler.\n kwargs: dict\n These will be passed through the dask-scheduler CLI to the\n dask.distributed.Scheduler class\n \"\"\"\n\n def __init__(\n self, address: str, connect_options: dict, kwargs: dict, remote_python=None\n ):\n super().__init__()\n\n self.address = address\n self.kwargs = kwargs\n self.connect_options = connect_options\n self.remote_python = remote_python\n\n async def start(self):\n import asyncssh # import now to avoid adding to module startup time\n\n logger.debug(\"Created Scheduler Connection\")\n\n self.connection = await asyncssh.connect(self.address, **self.connect_options)\n\n result = await self.connection.run(\"uname\")\n if result.exit_status == 0:\n set_env = 'env DASK_INTERNAL_INHERIT_CONFIG=\"{}\"'.format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n result = await self.connection.run(\"cmd /c ver\")\n if result.exit_status == 0:\n set_env = \"set DASK_INTERNAL_INHERIT_CONFIG={} &&\".format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n raise Exception(\n \"Scheduler failed to set DASK_INTERNAL_INHERIT_CONFIG variable \"\n )\n\n cmd = \" \".join(\n [\n set_env,\n self.remote_python or sys.executable,\n \"-m\",\n \"distributed.cli.dask_scheduler\",\n ]\n + cli_keywords(self.kwargs, cls=_Scheduler)\n )\n self.proc = await self.connection.create_process(cmd)\n\n # We watch stderr in order to get the address, then we return\n while True:\n line = await self.proc.stderr.readline()\n if not line.strip():\n raise Exception(\"Worker failed to start\")\n logger.info(line.strip())\n if \"Scheduler at\" in line:\n self.address = line.split(\"Scheduler at:\")[1].strip()\n break\n logger.debug(\"%s\", line)\n await super().start()\n\n\nold_cluster_kwargs = {\n \"scheduler_addr\",\n \"scheduler_port\",\n \"worker_addrs\",\n \"nthreads\",\n \"nprocs\",\n \"ssh_username\",\n \"ssh_port\",\n \"ssh_private_key\",\n \"nohost\",\n \"logdir\",\n \"remote_python\",\n \"memory_limit\",\n \"worker_port\",\n \"nanny_port\",\n \"remote_dask_worker\",\n}\n\n\ndef SSHCluster(\n hosts: List[str] = None,\n connect_options: dict = {},\n worker_options: dict = {},\n scheduler_options: dict = {},\n worker_module: str = \"distributed.cli.dask_worker\",\n remote_python: str = None,\n **kwargs\n):\n \"\"\" Deploy a Dask cluster using SSH\n\n The SSHCluster function deploys a Dask Scheduler and Workers for you on a\n set of machine addresses that you provide. The first address will be used\n for the scheduler while the rest will be used for the workers (feel free to\n repeat the first hostname if you want to have the scheduler and worker\n co-habitate one machine.)\n\n You may configure the scheduler and workers by passing\n ``scheduler_options`` and ``worker_options`` dictionary keywords. See the\n ``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for\n details on the available options, but the defaults should work in most\n situations.\n\n You may configure your use of SSH itself using the ``connect_options``\n keyword, which passes values to the ``asyncssh.connect`` function. For\n more information on these see the documentation for the ``asyncssh``\n library https://asyncssh.readthedocs.io .\n\n Parameters\n ----------\n hosts: List[str]\n List of hostnames or addresses on which to launch our cluster.\n The first will be used for the scheduler and the rest for workers.\n connect_options: dict, optional\n Keywords to pass through to ``asyncssh.connect``.\n worker_options: dict, optional\n Keywords to pass on to workers.\n scheduler_options: dict, optional\n Keywords to pass on to scheduler.\n worker_module: str, optional\n Python module to call to start the worker.\n remote_python: str, optional\n Path to Python on remote nodes.\n\n Examples\n --------\n >>> from dask.distributed import Client, SSHCluster\n >>> cluster = SSHCluster(\n ... [\"localhost\", \"localhost\", \"localhost\", \"localhost\"],\n ... connect_options={\"known_hosts\": None},\n ... worker_options={\"nthreads\": 2},\n ... scheduler_options={\"port\": 0, \"dashboard_address\": \":8797\"}\n ... )\n >>> client = Client(cluster)\n\n An example using a different worker module, in particular the\n ``dask-cuda-worker`` command from the ``dask-cuda`` project.\n\n >>> from dask.distributed import Client, SSHCluster\n >>> cluster = SSHCluster(\n ... [\"localhost\", \"hostwithgpus\", \"anothergpuhost\"],\n ... connect_options={\"known_hosts\": None},\n ... scheduler_options={\"port\": 0, \"dashboard_address\": \":8797\"},\n ... worker_module='dask_cuda.dask_cuda_worker')\n >>> client = Client(cluster)\n\n See Also\n --------\n dask.distributed.Scheduler\n dask.distributed.Worker\n asyncssh.connect\n \"\"\"\n if set(kwargs) & old_cluster_kwargs:\n from .old_ssh import SSHCluster as OldSSHCluster\n\n warnings.warn(\n \"Note that the SSHCluster API has been replaced. \"\n \"We're routing you to the older implementation. \"\n \"This will be removed in the future\"\n )\n kwargs.setdefault(\"worker_addrs\", hosts)\n return OldSSHCluster(**kwargs)\n\n scheduler = {\n \"cls\": Scheduler,\n \"options\": {\n \"address\": hosts[0],\n \"connect_options\": connect_options,\n \"kwargs\": scheduler_options,\n \"remote_python\": remote_python,\n },\n }\n workers = {\n i: {\n \"cls\": Worker,\n \"options\": {\n \"address\": host,\n \"connect_options\": connect_options,\n \"kwargs\": worker_options,\n \"worker_module\": worker_module,\n \"remote_python\": remote_python,\n },\n }\n for i, host in enumerate(hosts[1:])\n }\n return SpecCluster(workers, scheduler, name=\"SSHCluster\", **kwargs)\n", "path": "distributed/deploy/ssh.py"}], "after_files": [{"content": "import logging\nimport sys\nfrom typing import List, Union\nimport warnings\nimport weakref\n\nimport dask\n\nfrom .spec import SpecCluster, ProcessInterface\nfrom ..utils import cli_keywords\nfrom ..scheduler import Scheduler as _Scheduler\nfrom ..worker import Worker as _Worker\nfrom ..utils import serialize_for_cli\n\nlogger = logging.getLogger(__name__)\n\n\nclass Process(ProcessInterface):\n \"\"\" A superclass for SSH Workers and Nannies\n\n See Also\n --------\n Worker\n Scheduler\n \"\"\"\n\n def __init__(self, **kwargs):\n self.connection = None\n self.proc = None\n super().__init__(**kwargs)\n\n async def start(self):\n assert self.connection\n weakref.finalize(\n self, self.proc.kill\n ) # https://github.com/ronf/asyncssh/issues/112\n await super().start()\n\n async def close(self):\n self.proc.kill() # https://github.com/ronf/asyncssh/issues/112\n self.connection.close()\n await super().close()\n\n def __repr__(self):\n return \"<SSH %s: status=%s>\" % (type(self).__name__, self.status)\n\n\nclass Worker(Process):\n \"\"\" A Remote Dask Worker controled by SSH\n\n Parameters\n ----------\n scheduler: str\n The address of the scheduler\n address: str\n The hostname where we should run this worker\n worker_module: str\n The python module to run to start the worker.\n connect_options: dict\n kwargs to be passed to asyncssh connections\n remote_python: str\n Path to Python on remote node to run this worker.\n kwargs: dict\n These will be passed through the dask-worker CLI to the\n dask.distributed.Worker class\n \"\"\"\n\n def __init__(\n self,\n scheduler: str,\n address: str,\n connect_options: dict,\n kwargs: dict,\n worker_module=\"distributed.cli.dask_worker\",\n remote_python=None,\n loop=None,\n name=None,\n ):\n super().__init__()\n\n self.address = address\n self.scheduler = scheduler\n self.worker_module = worker_module\n self.connect_options = connect_options\n self.kwargs = kwargs\n self.name = name\n self.remote_python = remote_python\n\n async def start(self):\n import asyncssh # import now to avoid adding to module startup time\n\n self.connection = await asyncssh.connect(self.address, **self.connect_options)\n\n result = await self.connection.run(\"uname\")\n if result.exit_status == 0:\n set_env = 'env DASK_INTERNAL_INHERIT_CONFIG=\"{}\"'.format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n result = await self.connection.run(\"cmd /c ver\")\n if result.exit_status == 0:\n set_env = \"set DASK_INTERNAL_INHERIT_CONFIG={} &&\".format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n raise Exception(\n \"Worker failed to set DASK_INTERNAL_INHERIT_CONFIG variable \"\n )\n\n cmd = \" \".join(\n [\n set_env,\n self.remote_python or sys.executable,\n \"-m\",\n self.worker_module,\n self.scheduler,\n \"--name\",\n str(self.name),\n ]\n + cli_keywords(self.kwargs, cls=_Worker, cmd=self.worker_module)\n )\n\n self.proc = await self.connection.create_process(cmd)\n\n # We watch stderr in order to get the address, then we return\n while True:\n line = await self.proc.stderr.readline()\n if not line.strip():\n raise Exception(\"Worker failed to start\")\n logger.info(line.strip())\n if \"worker at\" in line:\n self.address = line.split(\"worker at:\")[1].strip()\n self.status = \"running\"\n break\n logger.debug(\"%s\", line)\n await super().start()\n\n\nclass Scheduler(Process):\n \"\"\" A Remote Dask Scheduler controlled by SSH\n\n Parameters\n ----------\n address: str\n The hostname where we should run this worker\n connect_options: dict\n kwargs to be passed to asyncssh connections\n remote_python: str\n Path to Python on remote node to run this scheduler.\n kwargs: dict\n These will be passed through the dask-scheduler CLI to the\n dask.distributed.Scheduler class\n \"\"\"\n\n def __init__(\n self, address: str, connect_options: dict, kwargs: dict, remote_python=None\n ):\n super().__init__()\n\n self.address = address\n self.kwargs = kwargs\n self.connect_options = connect_options\n self.remote_python = remote_python\n\n async def start(self):\n import asyncssh # import now to avoid adding to module startup time\n\n logger.debug(\"Created Scheduler Connection\")\n\n self.connection = await asyncssh.connect(self.address, **self.connect_options)\n\n result = await self.connection.run(\"uname\")\n if result.exit_status == 0:\n set_env = 'env DASK_INTERNAL_INHERIT_CONFIG=\"{}\"'.format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n result = await self.connection.run(\"cmd /c ver\")\n if result.exit_status == 0:\n set_env = \"set DASK_INTERNAL_INHERIT_CONFIG={} &&\".format(\n serialize_for_cli(dask.config.global_config)\n )\n else:\n raise Exception(\n \"Scheduler failed to set DASK_INTERNAL_INHERIT_CONFIG variable \"\n )\n\n cmd = \" \".join(\n [\n set_env,\n self.remote_python or sys.executable,\n \"-m\",\n \"distributed.cli.dask_scheduler\",\n ]\n + cli_keywords(self.kwargs, cls=_Scheduler)\n )\n self.proc = await self.connection.create_process(cmd)\n\n # We watch stderr in order to get the address, then we return\n while True:\n line = await self.proc.stderr.readline()\n if not line.strip():\n raise Exception(\"Worker failed to start\")\n logger.info(line.strip())\n if \"Scheduler at\" in line:\n self.address = line.split(\"Scheduler at:\")[1].strip()\n break\n logger.debug(\"%s\", line)\n await super().start()\n\n\nold_cluster_kwargs = {\n \"scheduler_addr\",\n \"scheduler_port\",\n \"worker_addrs\",\n \"nthreads\",\n \"nprocs\",\n \"ssh_username\",\n \"ssh_port\",\n \"ssh_private_key\",\n \"nohost\",\n \"logdir\",\n \"remote_python\",\n \"memory_limit\",\n \"worker_port\",\n \"nanny_port\",\n \"remote_dask_worker\",\n}\n\n\ndef SSHCluster(\n hosts: List[str] = None,\n connect_options: Union[List[dict], dict] = {},\n worker_options: dict = {},\n scheduler_options: dict = {},\n worker_module: str = \"distributed.cli.dask_worker\",\n remote_python: str = None,\n **kwargs\n):\n \"\"\" Deploy a Dask cluster using SSH\n\n The SSHCluster function deploys a Dask Scheduler and Workers for you on a\n set of machine addresses that you provide. The first address will be used\n for the scheduler while the rest will be used for the workers (feel free to\n repeat the first hostname if you want to have the scheduler and worker\n co-habitate one machine.)\n\n You may configure the scheduler and workers by passing\n ``scheduler_options`` and ``worker_options`` dictionary keywords. See the\n ``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for\n details on the available options, but the defaults should work in most\n situations.\n\n You may configure your use of SSH itself using the ``connect_options``\n keyword, which passes values to the ``asyncssh.connect`` function. For\n more information on these see the documentation for the ``asyncssh``\n library https://asyncssh.readthedocs.io .\n\n Parameters\n ----------\n hosts: List[str]\n List of hostnames or addresses on which to launch our cluster.\n The first will be used for the scheduler and the rest for workers.\n connect_options: dict or list of dict, optional\n Keywords to pass through to ``asyncssh.connect``.\n If a list it must have the same length as ``hosts``.\n worker_options: dict, optional\n Keywords to pass on to workers.\n scheduler_options: dict, optional\n Keywords to pass on to scheduler.\n worker_module: str, optional\n Python module to call to start the worker.\n remote_python: str, optional\n Path to Python on remote nodes.\n\n Examples\n --------\n >>> from dask.distributed import Client, SSHCluster\n >>> cluster = SSHCluster(\n ... [\"localhost\", \"localhost\", \"localhost\", \"localhost\"],\n ... connect_options={\"known_hosts\": None},\n ... worker_options={\"nthreads\": 2},\n ... scheduler_options={\"port\": 0, \"dashboard_address\": \":8797\"}\n ... )\n >>> client = Client(cluster)\n\n An example using a different worker module, in particular the\n ``dask-cuda-worker`` command from the ``dask-cuda`` project.\n\n >>> from dask.distributed import Client, SSHCluster\n >>> cluster = SSHCluster(\n ... [\"localhost\", \"hostwithgpus\", \"anothergpuhost\"],\n ... connect_options={\"known_hosts\": None},\n ... scheduler_options={\"port\": 0, \"dashboard_address\": \":8797\"},\n ... worker_module='dask_cuda.dask_cuda_worker')\n >>> client = Client(cluster)\n\n See Also\n --------\n dask.distributed.Scheduler\n dask.distributed.Worker\n asyncssh.connect\n \"\"\"\n if set(kwargs) & old_cluster_kwargs:\n from .old_ssh import SSHCluster as OldSSHCluster\n\n warnings.warn(\n \"Note that the SSHCluster API has been replaced. \"\n \"We're routing you to the older implementation. \"\n \"This will be removed in the future\"\n )\n kwargs.setdefault(\"worker_addrs\", hosts)\n return OldSSHCluster(**kwargs)\n\n if isinstance(connect_options, list) and len(connect_options) != len(hosts):\n raise RuntimeError(\n \"When specifying a list of connect_options you must provide a \"\n \"dictionary for each address.\"\n )\n\n scheduler = {\n \"cls\": Scheduler,\n \"options\": {\n \"address\": hosts[0],\n \"connect_options\": connect_options\n if isinstance(connect_options, dict)\n else connect_options[0],\n \"kwargs\": scheduler_options,\n \"remote_python\": remote_python,\n },\n }\n workers = {\n i: {\n \"cls\": Worker,\n \"options\": {\n \"address\": host,\n \"connect_options\": connect_options\n if isinstance(connect_options, dict)\n else connect_options[i + 1],\n \"kwargs\": worker_options,\n \"worker_module\": worker_module,\n \"remote_python\": remote_python,\n },\n }\n for i, host in enumerate(hosts[1:])\n }\n return SpecCluster(workers, scheduler, name=\"SSHCluster\", **kwargs)\n", "path": "distributed/deploy/ssh.py"}]} | 3,542 | 528 |
gh_patches_debug_15564 | rasdani/github-patches | git_diff | pytorch__ignite-1727 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[search] use algolia docsearch
## 📚 Documentation
While built-in search is enough, searching something and hit enter feels like not a good UX.
To be able to search interactively, we can apply [Algolia Docsearch](https://docsearch.algolia.com/) for that.
PyTorch is also using it when searching from homepage.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/stable/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 import os
16 import sys
17
18 sys.path.insert(0, os.path.abspath("../.."))
19 import ignite
20 import pytorch_sphinx_theme
21
22 from datetime import datetime
23
24 # -- Project information -----------------------------------------------------
25
26 project = "ignite"
27 author = "PyTorch-Ignite Contributors"
28 copyright = f"{datetime.now().year}, {author}"
29
30 # The short X.Y version
31 try:
32 version = os.environ["code_version"]
33 if "master" in version:
34 version = "master (" + ignite.__version__ + ")"
35 else:
36 version = version.replace("v", "")
37 except KeyError:
38 version = ignite.__version__
39
40 # The full version, including alpha/beta/rc tags
41 release = "master"
42
43
44 # -- General configuration ---------------------------------------------------
45
46 # If your documentation needs a minimal Sphinx version, state it here.
47 #
48 # needs_sphinx = '1.0'
49
50 # Add any Sphinx extension module names here, as strings. They can be
51 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
52 # ones.
53 extensions = [
54 "sphinx.ext.autodoc",
55 "sphinx.ext.autosummary",
56 "sphinx.ext.doctest",
57 "sphinx.ext.intersphinx",
58 "sphinx.ext.todo",
59 "sphinx.ext.coverage",
60 "sphinxcontrib.katex",
61 "sphinx.ext.napoleon",
62 "sphinx.ext.viewcode",
63 "sphinx.ext.autosectionlabel",
64 ]
65
66 # katex options
67 katex_prerender = True
68
69 # Add any paths that contain templates here, relative to this directory.
70 templates_path = ["_templates"]
71
72 # The suffix(es) of source filenames.
73 # You can specify multiple suffix as a list of string:
74 #
75 # source_suffix = ['.rst', '.md']
76 source_suffix = ".rst"
77
78 # The master toctree document.
79 master_doc = "index"
80
81 # The language for content autogenerated by Sphinx. Refer to documentation
82 # for a list of supported languages.
83 #
84 # This is also used if you do content translation via gettext catalogs.
85 # Usually you set "language" from the command line for these cases.
86 language = None
87
88 # List of patterns, relative to source directory, that match files and
89 # directories to ignore when looking for source files.
90 # This pattern also affects html_static_path and html_extra_path .
91 exclude_patterns = []
92
93 # The name of the Pygments (syntax highlighting) style to use.
94 pygments_style = "sphinx"
95
96
97 # -- Options for HTML output -------------------------------------------------
98
99 # The theme to use for HTML and HTML Help pages. See the documentation for
100 # a list of builtin themes.
101 #
102 html_theme = "pytorch_sphinx_theme"
103 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
104
105 html_theme_options = {
106 "canonical_url": "https://pytorch.org/ignite/index.html",
107 "collapse_navigation": False,
108 "display_version": True,
109 "logo_only": True,
110 "navigation_with_keys": True,
111 }
112
113 html_logo = "_templates/_static/img/ignite_logo.svg"
114
115 html_favicon = "_templates/_static/img/ignite_logomark.svg"
116
117 # Theme options are theme-specific and customize the look and feel of a theme
118 # further. For a list of options available for each theme, see the
119 # documentation.
120 #
121 # html_theme_options = {}
122
123 # Add any paths that contain custom static files (such as style sheets) here,
124 # relative to this directory. They are copied after the builtin static files,
125 # so a file named "default.css" will overwrite the builtin "default.css".
126 html_static_path = ["_static", "_templates/_static"]
127
128 html_context = {
129 "css_files": [
130 # 'https://fonts.googleapis.com/css?family=Lato',
131 # '_static/css/pytorch_theme.css'
132 "_static/css/ignite_theme.css"
133 ],
134 }
135
136
137 # -- Options for HTMLHelp output ---------------------------------------------
138
139 # Output file base name for HTML help builder.
140 htmlhelp_basename = "ignitedoc"
141
142
143 # -- Options for LaTeX output ------------------------------------------------
144
145 latex_elements = {
146 # The paper size ('letterpaper' or 'a4paper').
147 #
148 # 'papersize': 'letterpaper',
149 # The font size ('10pt', '11pt' or '12pt').
150 #
151 # 'pointsize': '10pt',
152 # Additional stuff for the LaTeX preamble.
153 #
154 # 'preamble': '',
155 # Latex figure (float) alignment
156 #
157 # 'figure_align': 'htbp',
158 }
159
160 # Grouping the document tree into LaTeX files. List of tuples
161 # (source start file, target name, title,
162 # author, documentclass [howto, manual, or own class]).
163 latex_documents = [
164 (master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"),
165 ]
166
167
168 # -- Options for manual page output ------------------------------------------
169
170 # One entry per manual page. List of tuples
171 # (source start file, name, description, authors, manual section).
172 man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)]
173
174
175 # -- Options for Texinfo output ----------------------------------------------
176
177 # Grouping the document tree into Texinfo files. List of tuples
178 # (source start file, target name, title, author,
179 # dir menu entry, description, category)
180 texinfo_documents = [
181 (
182 master_doc,
183 "ignite",
184 "ignite Documentation",
185 author,
186 "ignite",
187 "One line description of project.",
188 "Miscellaneous",
189 ),
190 ]
191
192
193 # -- Extension configuration -------------------------------------------------
194
195 # -- Options for intersphinx extension ---------------------------------------
196
197 # Example configuration for intersphinx: refer to the Python standard library.
198 intersphinx_mapping = {
199 "python": ("https://docs.python.org/3", None),
200 "torch": ("https://pytorch.org/docs/stable/", None),
201 }
202
203 # -- Options for todo extension ----------------------------------------------
204
205 # If true, `todo` and `todoList` produce output, else they produce nothing.
206 todo_include_todos = True
207
208 # -- Type hints configs ------------------------------------------------------
209
210 autodoc_inherit_docstrings = True
211 autoclass_content = "both"
212 autodoc_typehints = "description"
213 napoleon_attr_annotations = True
214
215 # -- A patch that turns-off cross refs for type annotations ------------------
216
217 import sphinx.domains.python
218 from docutils import nodes
219 from sphinx import addnodes
220
221 # replaces pending_xref node with desc_type for type annotations
222 sphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type("", nodes.Text(t))
223
224 # -- Autosummary patch to get list of a classes, funcs automatically ----------
225
226 from importlib import import_module
227 from inspect import getmembers, isclass, isfunction
228 import sphinx.ext.autosummary
229 from sphinx.ext.autosummary import Autosummary
230 from docutils.parsers.rst import directives
231 from docutils.statemachine import StringList
232
233
234 class BetterAutosummary(Autosummary):
235 """Autosummary with autolisting for modules.
236
237 By default it tries to import all public names (__all__),
238 otherwise import all classes and/or functions in a module.
239
240 Options:
241 - :autolist: option to get list of classes and functions from currentmodule.
242 - :autolist-classes: option to get list of classes from currentmodule.
243 - :autolist-functions: option to get list of functions from currentmodule.
244
245 Example Usage:
246
247 .. currentmodule:: ignite.metrics
248
249 .. autosummary::
250 :nosignatures:
251 :autolist:
252 """
253
254 # Add new option
255 _option_spec = Autosummary.option_spec.copy()
256 _option_spec.update(
257 {
258 "autolist": directives.unchanged,
259 "autolist-classes": directives.unchanged,
260 "autolist-functions": directives.unchanged,
261 }
262 )
263 option_spec = _option_spec
264
265 def run(self):
266 for auto in ("autolist", "autolist-classes", "autolist-functions"):
267 if auto in self.options:
268 # Get current module name
269 module_name = self.env.ref_context.get("py:module")
270 # Import module
271 module = import_module(module_name)
272
273 # Get public names (if possible)
274 try:
275 names = getattr(module, "__all__")
276 except AttributeError:
277 # Get classes defined in the module
278 cls_names = [
279 name[0]
280 for name in getmembers(module, isclass)
281 if name[-1].__module__ == module_name and not (name[0].startswith("_"))
282 ]
283 # Get functions defined in the module
284 fn_names = [
285 name[0]
286 for name in getmembers(module, isfunction)
287 if (name[-1].__module__ == module_name) and not (name[0].startswith("_"))
288 ]
289 names = cls_names + fn_names
290 # It may happen that module doesn't have any defined class or func
291 if not names:
292 names = [name[0] for name in getmembers(module)]
293
294 # Filter out members w/o doc strings
295 names = [name for name in names if getattr(module, name).__doc__ is not None]
296
297 if auto == "autolist":
298 # Get list of all classes and functions inside module
299 names = [
300 name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))
301 ]
302 else:
303 if auto == "autolist-classes":
304 # Get only classes
305 check = isclass
306 elif auto == "autolist-functions":
307 # Get only functions
308 check = isfunction
309 else:
310 raise NotImplementedError
311
312 names = [name for name in names if check(getattr(module, name))]
313
314 # Update content
315 self.content = StringList(names)
316 return super().run()
317
318
319 # Patch original Autosummary
320 sphinx.ext.autosummary.Autosummary = BetterAutosummary
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -32,8 +32,8 @@
version = os.environ["code_version"]
if "master" in version:
version = "master (" + ignite.__version__ + ")"
- else:
- version = version.replace("v", "")
+ # else:
+ # version = version.replace("v", "")
except KeyError:
version = ignite.__version__
@@ -129,7 +129,8 @@
"css_files": [
# 'https://fonts.googleapis.com/css?family=Lato',
# '_static/css/pytorch_theme.css'
- "_static/css/ignite_theme.css"
+ "_static/css/ignite_theme.css",
+ "https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css",
],
}
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -32,8 +32,8 @@\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n- else:\n- version = version.replace(\"v\", \"\")\n+ # else:\n+ # version = version.replace(\"v\", \"\")\n except KeyError:\n version = ignite.__version__\n \n@@ -129,7 +129,8 @@\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n- \"_static/css/ignite_theme.css\"\n+ \"_static/css/ignite_theme.css\",\n+ \"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css\",\n ],\n }\n", "issue": "[search] use algolia docsearch\n## \ud83d\udcda Documentation\r\nWhile built-in search is enough, searching something and hit enter feels like not a good UX.\r\nTo be able to search interactively, we can apply [Algolia Docsearch](https://docsearch.algolia.com/) for that.\r\nPyTorch is also using it when searching from homepage.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\nfrom datetime import datetime\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\nauthor = \"PyTorch-Ignite Contributors\"\ncopyright = f\"{datetime.now().year}, {author}\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinxcontrib.katex\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# katex options\nkatex_prerender = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"navigation_with_keys\": True,\n}\n\nhtml_logo = \"_templates/_static/img/ignite_logo.svg\"\n\nhtml_favicon = \"_templates/_static/img/ignite_logomark.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Type hints configs ------------------------------------------------------\n\nautodoc_inherit_docstrings = True\nautoclass_content = \"both\"\nautodoc_typehints = \"description\"\nnapoleon_attr_annotations = True\n\n# -- A patch that turns-off cross refs for type annotations ------------------\n\nimport sphinx.domains.python\nfrom docutils import nodes\nfrom sphinx import addnodes\n\n# replaces pending_xref node with desc_type for type annotations\nsphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type(\"\", nodes.Text(t))\n\n# -- Autosummary patch to get list of a classes, funcs automatically ----------\n\nfrom importlib import import_module\nfrom inspect import getmembers, isclass, isfunction\nimport sphinx.ext.autosummary\nfrom sphinx.ext.autosummary import Autosummary\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import StringList\n\n\nclass BetterAutosummary(Autosummary):\n \"\"\"Autosummary with autolisting for modules.\n\n By default it tries to import all public names (__all__),\n otherwise import all classes and/or functions in a module.\n\n Options:\n - :autolist: option to get list of classes and functions from currentmodule.\n - :autolist-classes: option to get list of classes from currentmodule.\n - :autolist-functions: option to get list of functions from currentmodule.\n\n Example Usage:\n\n .. currentmodule:: ignite.metrics\n\n .. autosummary::\n :nosignatures:\n :autolist:\n \"\"\"\n\n # Add new option\n _option_spec = Autosummary.option_spec.copy()\n _option_spec.update(\n {\n \"autolist\": directives.unchanged,\n \"autolist-classes\": directives.unchanged,\n \"autolist-functions\": directives.unchanged,\n }\n )\n option_spec = _option_spec\n\n def run(self):\n for auto in (\"autolist\", \"autolist-classes\", \"autolist-functions\"):\n if auto in self.options:\n # Get current module name\n module_name = self.env.ref_context.get(\"py:module\")\n # Import module\n module = import_module(module_name)\n\n # Get public names (if possible)\n try:\n names = getattr(module, \"__all__\")\n except AttributeError:\n # Get classes defined in the module\n cls_names = [\n name[0]\n for name in getmembers(module, isclass)\n if name[-1].__module__ == module_name and not (name[0].startswith(\"_\"))\n ]\n # Get functions defined in the module\n fn_names = [\n name[0]\n for name in getmembers(module, isfunction)\n if (name[-1].__module__ == module_name) and not (name[0].startswith(\"_\"))\n ]\n names = cls_names + fn_names\n # It may happen that module doesn't have any defined class or func\n if not names:\n names = [name[0] for name in getmembers(module)]\n\n # Filter out members w/o doc strings\n names = [name for name in names if getattr(module, name).__doc__ is not None]\n\n if auto == \"autolist\":\n # Get list of all classes and functions inside module\n names = [\n name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))\n ]\n else:\n if auto == \"autolist-classes\":\n # Get only classes\n check = isclass\n elif auto == \"autolist-functions\":\n # Get only functions\n check = isfunction\n else:\n raise NotImplementedError\n\n names = [name for name in names if check(getattr(module, name))]\n\n # Update content\n self.content = StringList(names)\n return super().run()\n\n\n# Patch original Autosummary\nsphinx.ext.autosummary.Autosummary = BetterAutosummary\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\nfrom datetime import datetime\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\nauthor = \"PyTorch-Ignite Contributors\"\ncopyright = f\"{datetime.now().year}, {author}\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n # else:\n # version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinxcontrib.katex\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# katex options\nkatex_prerender = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"navigation_with_keys\": True,\n}\n\nhtml_logo = \"_templates/_static/img/ignite_logo.svg\"\n\nhtml_favicon = \"_templates/_static/img/ignite_logomark.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\",\n \"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css\",\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Type hints configs ------------------------------------------------------\n\nautodoc_inherit_docstrings = True\nautoclass_content = \"both\"\nautodoc_typehints = \"description\"\nnapoleon_attr_annotations = True\n\n# -- A patch that turns-off cross refs for type annotations ------------------\n\nimport sphinx.domains.python\nfrom docutils import nodes\nfrom sphinx import addnodes\n\n# replaces pending_xref node with desc_type for type annotations\nsphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type(\"\", nodes.Text(t))\n\n# -- Autosummary patch to get list of a classes, funcs automatically ----------\n\nfrom importlib import import_module\nfrom inspect import getmembers, isclass, isfunction\nimport sphinx.ext.autosummary\nfrom sphinx.ext.autosummary import Autosummary\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import StringList\n\n\nclass BetterAutosummary(Autosummary):\n \"\"\"Autosummary with autolisting for modules.\n\n By default it tries to import all public names (__all__),\n otherwise import all classes and/or functions in a module.\n\n Options:\n - :autolist: option to get list of classes and functions from currentmodule.\n - :autolist-classes: option to get list of classes from currentmodule.\n - :autolist-functions: option to get list of functions from currentmodule.\n\n Example Usage:\n\n .. currentmodule:: ignite.metrics\n\n .. autosummary::\n :nosignatures:\n :autolist:\n \"\"\"\n\n # Add new option\n _option_spec = Autosummary.option_spec.copy()\n _option_spec.update(\n {\n \"autolist\": directives.unchanged,\n \"autolist-classes\": directives.unchanged,\n \"autolist-functions\": directives.unchanged,\n }\n )\n option_spec = _option_spec\n\n def run(self):\n for auto in (\"autolist\", \"autolist-classes\", \"autolist-functions\"):\n if auto in self.options:\n # Get current module name\n module_name = self.env.ref_context.get(\"py:module\")\n # Import module\n module = import_module(module_name)\n\n # Get public names (if possible)\n try:\n names = getattr(module, \"__all__\")\n except AttributeError:\n # Get classes defined in the module\n cls_names = [\n name[0]\n for name in getmembers(module, isclass)\n if name[-1].__module__ == module_name and not (name[0].startswith(\"_\"))\n ]\n # Get functions defined in the module\n fn_names = [\n name[0]\n for name in getmembers(module, isfunction)\n if (name[-1].__module__ == module_name) and not (name[0].startswith(\"_\"))\n ]\n names = cls_names + fn_names\n # It may happen that module doesn't have any defined class or func\n if not names:\n names = [name[0] for name in getmembers(module)]\n\n # Filter out members w/o doc strings\n names = [name for name in names if getattr(module, name).__doc__ is not None]\n\n if auto == \"autolist\":\n # Get list of all classes and functions inside module\n names = [\n name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))\n ]\n else:\n if auto == \"autolist-classes\":\n # Get only classes\n check = isclass\n elif auto == \"autolist-functions\":\n # Get only functions\n check = isfunction\n else:\n raise NotImplementedError\n\n names = [name for name in names if check(getattr(module, name))]\n\n # Update content\n self.content = StringList(names)\n return super().run()\n\n\n# Patch original Autosummary\nsphinx.ext.autosummary.Autosummary = BetterAutosummary\n", "path": "docs/source/conf.py"}]} | 3,465 | 200 |
gh_patches_debug_33270 | rasdani/github-patches | git_diff | Flexget__Flexget-83 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't log in into myepisodes
When usin the myepisodes plugin you get the warning "Login to myepisodes.com failed, please check your account data or see if the site is down."
It seems that they have changed so you have to access their site with www.myepisodes.com.
I made a copy of the plugin and changed all the urls to www.myepisodes.com and it worked
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/services/myepisodes.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 import logging
3 import urllib
4 import urllib2
5 import re
6 import cookielib
7 from datetime import datetime
8
9 from sqlalchemy import Column, Integer, String, DateTime
10
11 from flexget import db_schema
12 from flexget.plugin import register_plugin, DependencyError, PluginWarning
13
14 try:
15 from flexget.plugins.api_tvdb import lookup_series
16 except ImportError:
17 raise DependencyError(issued_by='myepisodes', missing='api_tvdb',
18 message='myepisodes requires the `api_tvdb` plugin')
19
20
21 log = logging.getLogger('myepisodes')
22 Base = db_schema.versioned_base('myepisodes', 0)
23
24
25 class MyEpisodesInfo(Base):
26 __tablename__ = 'myepisodes'
27
28 id = Column(Integer, primary_key=True)
29 series_name = Column(String, unique=True)
30 myepisodes_id = Column(Integer, unique=True)
31 updated = Column(DateTime)
32
33 def __init__(self, series_name, myepisodes_id):
34 self.series_name = series_name
35 self.myepisodes_id = myepisodes_id
36 self.updated = datetime.now()
37
38 def __repr__(self):
39 return '<MyEpisodesInfo(series_name=%s, myepisodes_id=%s)>' % (self.series_name, self.myepisodes_id)
40
41
42 class MyEpisodes(object):
43 """
44 Marks a series episode as acquired in your myepisodes.com account.
45
46 Simple Example:
47
48 Most shows are recognized automatically from their TVDBname.
49 And of course the plugin needs to know your MyEpisodes.com account details.
50
51 tasks:
52 tvshows:
53 myepisodes:
54 username: <username>
55 password: <password>
56 series:
57 - human target
58 - chuck
59
60 Advanced Example:
61
62 In some cases, the TVDB name is either not unique or won't even be discovered.
63 In that case you need to specify the MyEpisodes id manually using the set plugin.
64
65 tasks:
66 tvshows:
67 myepisodes:
68 username: <username>
69 password: <password>
70 series:
71 - human target:
72 set:
73 myepisodes_id: 5111
74 - chuck
75
76 How to find the MyEpisodes id: http://matrixagents.org/screencasts/myep_example-20110507-131555.png
77 """
78
79 schema = {
80 'type': 'object',
81 'properties': {
82 'username': {'type': 'string'},
83 'password': {'type': 'string'}
84 },
85 'required': ['username', 'password'],
86 'additionalProperties': False
87 }
88
89 def on_task_exit(self, task, config):
90 """Mark all accepted episodes as acquired on MyEpisodes"""
91 if not task.accepted:
92 # Nothing accepted, don't do anything
93 return
94
95 username = config['username']
96 password = config['password']
97
98 cookiejar = cookielib.CookieJar()
99 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
100 baseurl = urllib2.Request('http://myepisodes.com/login.php?')
101 loginparams = urllib.urlencode({'username': username,
102 'password': password,
103 'action': 'Login'})
104 try:
105 logincon = opener.open(baseurl, loginparams)
106 loginsrc = logincon.read()
107 except urllib2.URLError as e:
108 log.error('Error logging in to myepisodes: %s' % e)
109 return
110
111 if str(username) not in loginsrc:
112 raise PluginWarning(('Login to myepisodes.com failed, please check '
113 'your account data or see if the site is down.'), log)
114
115 for entry in task.accepted:
116 try:
117 self.mark_episode(task, entry, opener)
118 except PluginWarning as w:
119 log.warning(str(w))
120
121 def lookup_myepisodes_id(self, entry, opener, session):
122 """Populates myepisodes_id field for an entry, and returns the id.
123
124 Call will also set entry field `myepisode_id` if successful.
125
126 Return:
127 myepisode id
128
129 Raises:
130 LookupError if entry does not have field series_name
131 """
132
133 # Don't need to look it up if we already have it.
134 if entry.get('myepisodes_id'):
135 return entry['myepisodes_id']
136
137 if not entry.get('series_name'):
138 raise LookupError('Cannot lookup myepisodes id for entries without series_name')
139 series_name = entry['series_name']
140
141 # First check if we already have a myepisodes id stored for this series
142 myepisodes_info = session.query(MyEpisodesInfo).\
143 filter(MyEpisodesInfo.series_name == series_name.lower()).first()
144 if myepisodes_info:
145 entry['myepisodes_id'] = myepisodes_info.myepisodes_id
146 return myepisodes_info.myepisodes_id
147
148 # Get the series name from thetvdb to increase match chance on myepisodes
149 if entry.get('tvdb_series_name'):
150 query_name = entry['tvdb_series_name']
151 else:
152 try:
153 series = lookup_series(name=series_name, tvdb_id=entry.get('tvdb_id'))
154 query_name = series.seriesname
155 except LookupError as e:
156 log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)
157 query_name = series_name
158
159 baseurl = urllib2.Request('http://myepisodes.com/search.php?')
160 params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})
161 try:
162 con = opener.open(baseurl, params)
163 txt = con.read()
164 except urllib2.URLError as e:
165 log.error('Error searching for myepisodes id: %s' % e)
166
167 matchObj = re.search(r'&showid=([0-9]*)">' + query_name + '</a>', txt, re.MULTILINE | re.IGNORECASE)
168 if matchObj:
169 myepisodes_id = matchObj.group(1)
170 db_item = session.query(MyEpisodesInfo).filter(MyEpisodesInfo.myepisodes_id == myepisodes_id).first()
171 if db_item:
172 log.info('Changing name to `%s` for series with myepisodes_id %s' %
173 (series_name.lower(), myepisodes_id))
174 db_item.series_name = series_name.lower()
175 else:
176 session.add(MyEpisodesInfo(series_name.lower(), myepisodes_id))
177 entry['myepisodes_id'] = myepisodes_id
178 return myepisodes_id
179
180 def mark_episode(self, task, entry, opener):
181 """Mark episode as acquired.
182
183 Required entry fields:
184 - series_name
185 - series_season
186 - series_episode
187
188 Raises:
189 PluginWarning if operation fails
190 """
191
192 if 'series_season' not in entry or 'series_episode' not in entry or 'series_name' not in entry:
193 raise PluginWarning(
194 'Can\'t mark entry `%s` in myepisodes without series_season, series_episode and series_name fields' %
195 entry['title'], log)
196
197 if not self.lookup_myepisodes_id(entry, opener, session=task.session):
198 raise PluginWarning('Couldn\'t get myepisodes id for `%s`' % entry['title'], log)
199
200 myepisodes_id = entry['myepisodes_id']
201 season = entry['series_season']
202 episode = entry['series_episode']
203
204 if task.manager.options.test:
205 log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
206 else:
207 baseurl2 = urllib2.Request(
208 'http://myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %
209 (myepisodes_id, season, episode))
210 opener.open(baseurl2)
211 log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
212
213
214 register_plugin(MyEpisodes, 'myepisodes', api_ver=2)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/services/myepisodes.py b/flexget/plugins/services/myepisodes.py
--- a/flexget/plugins/services/myepisodes.py
+++ b/flexget/plugins/services/myepisodes.py
@@ -97,7 +97,7 @@
cookiejar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
- baseurl = urllib2.Request('http://myepisodes.com/login.php?')
+ baseurl = urllib2.Request('http://www.myepisodes.com/login.php?')
loginparams = urllib.urlencode({'username': username,
'password': password,
'action': 'Login'})
@@ -156,7 +156,7 @@
log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)
query_name = series_name
- baseurl = urllib2.Request('http://myepisodes.com/search.php?')
+ baseurl = urllib2.Request('http://www.myepisodes.com/search.php?')
params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})
try:
con = opener.open(baseurl, params)
@@ -205,7 +205,7 @@
log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
else:
baseurl2 = urllib2.Request(
- 'http://myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %
+ 'http://www.myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %
(myepisodes_id, season, episode))
opener.open(baseurl2)
log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
| {"golden_diff": "diff --git a/flexget/plugins/services/myepisodes.py b/flexget/plugins/services/myepisodes.py\n--- a/flexget/plugins/services/myepisodes.py\n+++ b/flexget/plugins/services/myepisodes.py\n@@ -97,7 +97,7 @@\n \n cookiejar = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))\n- baseurl = urllib2.Request('http://myepisodes.com/login.php?')\n+ baseurl = urllib2.Request('http://www.myepisodes.com/login.php?')\n loginparams = urllib.urlencode({'username': username,\n 'password': password,\n 'action': 'Login'})\n@@ -156,7 +156,7 @@\n log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)\n query_name = series_name\n \n- baseurl = urllib2.Request('http://myepisodes.com/search.php?')\n+ baseurl = urllib2.Request('http://www.myepisodes.com/search.php?')\n params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})\n try:\n con = opener.open(baseurl, params)\n@@ -205,7 +205,7 @@\n log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))\n else:\n baseurl2 = urllib2.Request(\n- 'http://myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %\n+ 'http://www.myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %\n (myepisodes_id, season, episode))\n opener.open(baseurl2)\n log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))\n", "issue": "Can't log in into myepisodes\nWhen usin the myepisodes plugin you get the warning \"Login to myepisodes.com failed, please check your account data or see if the site is down.\"\nIt seems that they have changed so you have to access their site with www.myepisodes.com. \n\nI made a copy of the plugin and changed all the urls to www.myepisodes.com and it worked\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nimport urllib\nimport urllib2\nimport re\nimport cookielib\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, Integer, String, DateTime\n\nfrom flexget import db_schema\nfrom flexget.plugin import register_plugin, DependencyError, PluginWarning\n\ntry:\n from flexget.plugins.api_tvdb import lookup_series\nexcept ImportError:\n raise DependencyError(issued_by='myepisodes', missing='api_tvdb',\n message='myepisodes requires the `api_tvdb` plugin')\n\n\nlog = logging.getLogger('myepisodes')\nBase = db_schema.versioned_base('myepisodes', 0)\n\n\nclass MyEpisodesInfo(Base):\n __tablename__ = 'myepisodes'\n\n id = Column(Integer, primary_key=True)\n series_name = Column(String, unique=True)\n myepisodes_id = Column(Integer, unique=True)\n updated = Column(DateTime)\n\n def __init__(self, series_name, myepisodes_id):\n self.series_name = series_name\n self.myepisodes_id = myepisodes_id\n self.updated = datetime.now()\n\n def __repr__(self):\n return '<MyEpisodesInfo(series_name=%s, myepisodes_id=%s)>' % (self.series_name, self.myepisodes_id)\n\n\nclass MyEpisodes(object):\n \"\"\"\n Marks a series episode as acquired in your myepisodes.com account.\n\n Simple Example:\n\n Most shows are recognized automatically from their TVDBname.\n And of course the plugin needs to know your MyEpisodes.com account details.\n\n tasks:\n tvshows:\n myepisodes:\n username: <username>\n password: <password>\n series:\n - human target\n - chuck\n\n Advanced Example:\n\n In some cases, the TVDB name is either not unique or won't even be discovered.\n In that case you need to specify the MyEpisodes id manually using the set plugin.\n\n tasks:\n tvshows:\n myepisodes:\n username: <username>\n password: <password>\n series:\n - human target:\n set:\n myepisodes_id: 5111\n - chuck\n\n How to find the MyEpisodes id: http://matrixagents.org/screencasts/myep_example-20110507-131555.png\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'}\n },\n 'required': ['username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_exit(self, task, config):\n \"\"\"Mark all accepted episodes as acquired on MyEpisodes\"\"\"\n if not task.accepted:\n # Nothing accepted, don't do anything\n return\n\n username = config['username']\n password = config['password']\n\n cookiejar = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))\n baseurl = urllib2.Request('http://myepisodes.com/login.php?')\n loginparams = urllib.urlencode({'username': username,\n 'password': password,\n 'action': 'Login'})\n try:\n logincon = opener.open(baseurl, loginparams)\n loginsrc = logincon.read()\n except urllib2.URLError as e:\n log.error('Error logging in to myepisodes: %s' % e)\n return\n\n if str(username) not in loginsrc:\n raise PluginWarning(('Login to myepisodes.com failed, please check '\n 'your account data or see if the site is down.'), log)\n\n for entry in task.accepted:\n try:\n self.mark_episode(task, entry, opener)\n except PluginWarning as w:\n log.warning(str(w))\n\n def lookup_myepisodes_id(self, entry, opener, session):\n \"\"\"Populates myepisodes_id field for an entry, and returns the id.\n\n Call will also set entry field `myepisode_id` if successful.\n\n Return:\n myepisode id\n\n Raises:\n LookupError if entry does not have field series_name\n \"\"\"\n\n # Don't need to look it up if we already have it.\n if entry.get('myepisodes_id'):\n return entry['myepisodes_id']\n\n if not entry.get('series_name'):\n raise LookupError('Cannot lookup myepisodes id for entries without series_name')\n series_name = entry['series_name']\n\n # First check if we already have a myepisodes id stored for this series\n myepisodes_info = session.query(MyEpisodesInfo).\\\n filter(MyEpisodesInfo.series_name == series_name.lower()).first()\n if myepisodes_info:\n entry['myepisodes_id'] = myepisodes_info.myepisodes_id\n return myepisodes_info.myepisodes_id\n\n # Get the series name from thetvdb to increase match chance on myepisodes\n if entry.get('tvdb_series_name'):\n query_name = entry['tvdb_series_name']\n else:\n try:\n series = lookup_series(name=series_name, tvdb_id=entry.get('tvdb_id'))\n query_name = series.seriesname\n except LookupError as e:\n log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)\n query_name = series_name\n\n baseurl = urllib2.Request('http://myepisodes.com/search.php?')\n params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})\n try:\n con = opener.open(baseurl, params)\n txt = con.read()\n except urllib2.URLError as e:\n log.error('Error searching for myepisodes id: %s' % e)\n\n matchObj = re.search(r'&showid=([0-9]*)\">' + query_name + '</a>', txt, re.MULTILINE | re.IGNORECASE)\n if matchObj:\n myepisodes_id = matchObj.group(1)\n db_item = session.query(MyEpisodesInfo).filter(MyEpisodesInfo.myepisodes_id == myepisodes_id).first()\n if db_item:\n log.info('Changing name to `%s` for series with myepisodes_id %s' %\n (series_name.lower(), myepisodes_id))\n db_item.series_name = series_name.lower()\n else:\n session.add(MyEpisodesInfo(series_name.lower(), myepisodes_id))\n entry['myepisodes_id'] = myepisodes_id\n return myepisodes_id\n\n def mark_episode(self, task, entry, opener):\n \"\"\"Mark episode as acquired.\n\n Required entry fields:\n - series_name\n - series_season\n - series_episode\n\n Raises:\n PluginWarning if operation fails\n \"\"\"\n\n if 'series_season' not in entry or 'series_episode' not in entry or 'series_name' not in entry:\n raise PluginWarning(\n 'Can\\'t mark entry `%s` in myepisodes without series_season, series_episode and series_name fields' %\n entry['title'], log)\n\n if not self.lookup_myepisodes_id(entry, opener, session=task.session):\n raise PluginWarning('Couldn\\'t get myepisodes id for `%s`' % entry['title'], log)\n\n myepisodes_id = entry['myepisodes_id']\n season = entry['series_season']\n episode = entry['series_episode']\n\n if task.manager.options.test:\n log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))\n else:\n baseurl2 = urllib2.Request(\n 'http://myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %\n (myepisodes_id, season, episode))\n opener.open(baseurl2)\n log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))\n\n\nregister_plugin(MyEpisodes, 'myepisodes', api_ver=2)\n", "path": "flexget/plugins/services/myepisodes.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nimport urllib\nimport urllib2\nimport re\nimport cookielib\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, Integer, String, DateTime\n\nfrom flexget import db_schema\nfrom flexget.plugin import register_plugin, DependencyError, PluginWarning\n\ntry:\n from flexget.plugins.api_tvdb import lookup_series\nexcept ImportError:\n raise DependencyError(issued_by='myepisodes', missing='api_tvdb',\n message='myepisodes requires the `api_tvdb` plugin')\n\n\nlog = logging.getLogger('myepisodes')\nBase = db_schema.versioned_base('myepisodes', 0)\n\n\nclass MyEpisodesInfo(Base):\n __tablename__ = 'myepisodes'\n\n id = Column(Integer, primary_key=True)\n series_name = Column(String, unique=True)\n myepisodes_id = Column(Integer, unique=True)\n updated = Column(DateTime)\n\n def __init__(self, series_name, myepisodes_id):\n self.series_name = series_name\n self.myepisodes_id = myepisodes_id\n self.updated = datetime.now()\n\n def __repr__(self):\n return '<MyEpisodesInfo(series_name=%s, myepisodes_id=%s)>' % (self.series_name, self.myepisodes_id)\n\n\nclass MyEpisodes(object):\n \"\"\"\n Marks a series episode as acquired in your myepisodes.com account.\n\n Simple Example:\n\n Most shows are recognized automatically from their TVDBname.\n And of course the plugin needs to know your MyEpisodes.com account details.\n\n tasks:\n tvshows:\n myepisodes:\n username: <username>\n password: <password>\n series:\n - human target\n - chuck\n\n Advanced Example:\n\n In some cases, the TVDB name is either not unique or won't even be discovered.\n In that case you need to specify the MyEpisodes id manually using the set plugin.\n\n tasks:\n tvshows:\n myepisodes:\n username: <username>\n password: <password>\n series:\n - human target:\n set:\n myepisodes_id: 5111\n - chuck\n\n How to find the MyEpisodes id: http://matrixagents.org/screencasts/myep_example-20110507-131555.png\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'}\n },\n 'required': ['username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_exit(self, task, config):\n \"\"\"Mark all accepted episodes as acquired on MyEpisodes\"\"\"\n if not task.accepted:\n # Nothing accepted, don't do anything\n return\n\n username = config['username']\n password = config['password']\n\n cookiejar = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))\n baseurl = urllib2.Request('http://www.myepisodes.com/login.php?')\n loginparams = urllib.urlencode({'username': username,\n 'password': password,\n 'action': 'Login'})\n try:\n logincon = opener.open(baseurl, loginparams)\n loginsrc = logincon.read()\n except urllib2.URLError as e:\n log.error('Error logging in to myepisodes: %s' % e)\n return\n\n if str(username) not in loginsrc:\n raise PluginWarning(('Login to myepisodes.com failed, please check '\n 'your account data or see if the site is down.'), log)\n\n for entry in task.accepted:\n try:\n self.mark_episode(task, entry, opener)\n except PluginWarning as w:\n log.warning(str(w))\n\n def lookup_myepisodes_id(self, entry, opener, session):\n \"\"\"Populates myepisodes_id field for an entry, and returns the id.\n\n Call will also set entry field `myepisode_id` if successful.\n\n Return:\n myepisode id\n\n Raises:\n LookupError if entry does not have field series_name\n \"\"\"\n\n # Don't need to look it up if we already have it.\n if entry.get('myepisodes_id'):\n return entry['myepisodes_id']\n\n if not entry.get('series_name'):\n raise LookupError('Cannot lookup myepisodes id for entries without series_name')\n series_name = entry['series_name']\n\n # First check if we already have a myepisodes id stored for this series\n myepisodes_info = session.query(MyEpisodesInfo).\\\n filter(MyEpisodesInfo.series_name == series_name.lower()).first()\n if myepisodes_info:\n entry['myepisodes_id'] = myepisodes_info.myepisodes_id\n return myepisodes_info.myepisodes_id\n\n # Get the series name from thetvdb to increase match chance on myepisodes\n if entry.get('tvdb_series_name'):\n query_name = entry['tvdb_series_name']\n else:\n try:\n series = lookup_series(name=series_name, tvdb_id=entry.get('tvdb_id'))\n query_name = series.seriesname\n except LookupError as e:\n log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)\n query_name = series_name\n\n baseurl = urllib2.Request('http://www.myepisodes.com/search.php?')\n params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})\n try:\n con = opener.open(baseurl, params)\n txt = con.read()\n except urllib2.URLError as e:\n log.error('Error searching for myepisodes id: %s' % e)\n\n matchObj = re.search(r'&showid=([0-9]*)\">' + query_name + '</a>', txt, re.MULTILINE | re.IGNORECASE)\n if matchObj:\n myepisodes_id = matchObj.group(1)\n db_item = session.query(MyEpisodesInfo).filter(MyEpisodesInfo.myepisodes_id == myepisodes_id).first()\n if db_item:\n log.info('Changing name to `%s` for series with myepisodes_id %s' %\n (series_name.lower(), myepisodes_id))\n db_item.series_name = series_name.lower()\n else:\n session.add(MyEpisodesInfo(series_name.lower(), myepisodes_id))\n entry['myepisodes_id'] = myepisodes_id\n return myepisodes_id\n\n def mark_episode(self, task, entry, opener):\n \"\"\"Mark episode as acquired.\n\n Required entry fields:\n - series_name\n - series_season\n - series_episode\n\n Raises:\n PluginWarning if operation fails\n \"\"\"\n\n if 'series_season' not in entry or 'series_episode' not in entry or 'series_name' not in entry:\n raise PluginWarning(\n 'Can\\'t mark entry `%s` in myepisodes without series_season, series_episode and series_name fields' %\n entry['title'], log)\n\n if not self.lookup_myepisodes_id(entry, opener, session=task.session):\n raise PluginWarning('Couldn\\'t get myepisodes id for `%s`' % entry['title'], log)\n\n myepisodes_id = entry['myepisodes_id']\n season = entry['series_season']\n episode = entry['series_episode']\n\n if task.manager.options.test:\n log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))\n else:\n baseurl2 = urllib2.Request(\n 'http://www.myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %\n (myepisodes_id, season, episode))\n opener.open(baseurl2)\n log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))\n\n\nregister_plugin(MyEpisodes, 'myepisodes', api_ver=2)\n", "path": "flexget/plugins/services/myepisodes.py"}]} | 2,669 | 438 |
gh_patches_debug_17349 | rasdani/github-patches | git_diff | conan-io__conan-center-index-19060 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[request] fast-cdr/1.1.0
### Package Name/Version
fast-cdr/1.1.0
### Changelog
https://github.com/eProsima/Fast-CDR/releases/tag/v1.1.0
### Context about the new update
The Conan Center Bot detects the updatable recipe in #3470.
Open branch [qchateau/conan-center-index/ccb-fast-cdr-1.1.0](https://github.com/qchateau/conan-center-index/tree/ccb-fast-cdr-1.1.0)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/fast-cdr/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import check_min_cppstd
4 from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
5 from conan.tools.files import collect_libs, copy, get, rm, rmdir, save
6 from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
7 import os
8 import textwrap
9
10 required_conan_version = ">=1.54.0"
11
12
13 class FastCDRConan(ConanFile):
14 name = "fast-cdr"
15 license = "Apache-2.0"
16 homepage = "https://github.com/eProsima/Fast-CDR"
17 url = "https://github.com/conan-io/conan-center-index"
18 description = "eProsima FastCDR library for serialization"
19 topics = ("dds", "middleware", "serialization")
20
21 package_type = "library"
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 def config_options(self):
33 if self.settings.os == "Windows":
34 del self.options.fPIC
35
36 def configure(self):
37 if self.options.shared:
38 self.options.rm_safe("fPIC")
39
40 def layout(self):
41 cmake_layout(self, src_folder="src")
42
43 def validate(self):
44 if self.settings.compiler.get_safe("cppstd"):
45 check_min_cppstd(self, 11)
46 if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):
47 # This combination leads to an fast-cdr error when linking
48 # linking dynamic '*.dll' and static MT runtime
49 # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37
50 # (2021-05-31)
51 raise ConanInvalidConfiguration("Mixing a dll eprosima library with a static runtime is a bad idea")
52
53 def source(self):
54 get(self, **self.conan_data["sources"][self.version], strip_root=True)
55
56 def generate(self):
57 tc = CMakeToolchain(self)
58 tc.variables["BUILD_STATIC"] = not self.options.shared
59 tc.generate()
60
61 def build(self):
62 cmake = CMake(self)
63 cmake.configure()
64 cmake.build()
65
66 def package(self):
67 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
68 cmake = CMake(self)
69 cmake.install()
70 rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
71 rmdir(self, os.path.join(self.package_folder, "share"))
72 rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
73 rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
74
75 # TODO: to remove in conan v2 once cmake_find_package_* generators removed
76 self._create_cmake_module_alias_targets(
77 os.path.join(self.package_folder, self._module_file_rel_path),
78 {"fastcdr": "fastcdr::fastcdr"}
79 )
80
81 def _create_cmake_module_alias_targets(self, module_file, targets):
82 content = ""
83 for alias, aliased in targets.items():
84 content += textwrap.dedent(f"""\
85 if(TARGET {aliased} AND NOT TARGET {alias})
86 add_library({alias} INTERFACE IMPORTED)
87 set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
88 endif()
89 """)
90 save(self, module_file, content)
91
92 @property
93 def _module_file_rel_path(self):
94 return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
95
96 def package_info(self):
97 self.cpp_info.set_property("cmake_file_name", "fastcdr")
98 self.cpp_info.set_property("cmake_target_name", "fastcdr")
99 self.cpp_info.libs = collect_libs(self)
100 if self.settings.os == "Windows" and self.options.shared:
101 self.cpp_info.defines.append("FASTCDR_DYN_LINK")
102
103 # TODO: to remove in conan v2 once cmake_find_package_* generators removed
104 self.cpp_info.names["cmake_find_package"] = "fastcdr"
105 self.cpp_info.names["cmake_find_package_multi"] = "fastcdr"
106 self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
107 self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/fast-cdr/all/conanfile.py b/recipes/fast-cdr/all/conanfile.py
--- a/recipes/fast-cdr/all/conanfile.py
+++ b/recipes/fast-cdr/all/conanfile.py
@@ -4,6 +4,7 @@
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import collect_libs, copy, get, rm, rmdir, save
from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
+from conan.tools.scm import Version
import os
import textwrap
@@ -40,6 +41,10 @@
def layout(self):
cmake_layout(self, src_folder="src")
+ def build_requirements(self):
+ if Version(self.version) >= "1.1.0":
+ self.tool_requires("cmake/[>=3.16.3 <4]")
+
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
| {"golden_diff": "diff --git a/recipes/fast-cdr/all/conanfile.py b/recipes/fast-cdr/all/conanfile.py\n--- a/recipes/fast-cdr/all/conanfile.py\n+++ b/recipes/fast-cdr/all/conanfile.py\n@@ -4,6 +4,7 @@\n from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\n from conan.tools.files import collect_libs, copy, get, rm, rmdir, save\n from conan.tools.microsoft import is_msvc, is_msvc_static_runtime\n+from conan.tools.scm import Version\n import os\n import textwrap\n \n@@ -40,6 +41,10 @@\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n \n+ def build_requirements(self):\n+ if Version(self.version) >= \"1.1.0\":\n+ self.tool_requires(\"cmake/[>=3.16.3 <4]\")\n+\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n", "issue": "[request] fast-cdr/1.1.0\n### Package Name/Version\n\nfast-cdr/1.1.0\n\n### Changelog\n\nhttps://github.com/eProsima/Fast-CDR/releases/tag/v1.1.0\n\n### Context about the new update\n\nThe Conan Center Bot detects the updatable recipe in #3470.\r\nOpen branch [qchateau/conan-center-index/ccb-fast-cdr-1.1.0](https://github.com/qchateau/conan-center-index/tree/ccb-fast-cdr-1.1.0)\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import collect_libs, copy, get, rm, rmdir, save\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.54.0\"\n\n\nclass FastCDRConan(ConanFile):\n name = \"fast-cdr\"\n license = \"Apache-2.0\"\n homepage = \"https://github.com/eProsima/Fast-CDR\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"eProsima FastCDR library for serialization\"\n topics = (\"dds\", \"middleware\", \"serialization\")\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):\n # This combination leads to an fast-cdr error when linking\n # linking dynamic '*.dll' and static MT runtime\n # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37\n # (2021-05-31)\n raise ConanInvalidConfiguration(\"Mixing a dll eprosima library with a static runtime is a bad idea\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_STATIC\"] = not self.options.shared\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"fastcdr\": \"fastcdr::fastcdr\"}\n )\n\n def _create_cmake_module_alias_targets(self, module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(f\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\")\n save(self, module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", f\"conan-official-{self.name}-targets.cmake\")\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"fastcdr\")\n self.cpp_info.set_property(\"cmake_target_name\", \"fastcdr\")\n self.cpp_info.libs = collect_libs(self)\n if self.settings.os == \"Windows\" and self.options.shared:\n self.cpp_info.defines.append(\"FASTCDR_DYN_LINK\")\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"fastcdr\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"fastcdr\"\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n", "path": "recipes/fast-cdr/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import collect_libs, copy, get, rm, rmdir, save\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nfrom conan.tools.scm import Version\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.54.0\"\n\n\nclass FastCDRConan(ConanFile):\n name = \"fast-cdr\"\n license = \"Apache-2.0\"\n homepage = \"https://github.com/eProsima/Fast-CDR\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"eProsima FastCDR library for serialization\"\n topics = (\"dds\", \"middleware\", \"serialization\")\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def build_requirements(self):\n if Version(self.version) >= \"1.1.0\":\n self.tool_requires(\"cmake/[>=3.16.3 <4]\")\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):\n # This combination leads to an fast-cdr error when linking\n # linking dynamic '*.dll' and static MT runtime\n # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37\n # (2021-05-31)\n raise ConanInvalidConfiguration(\"Mixing a dll eprosima library with a static runtime is a bad idea\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_STATIC\"] = not self.options.shared\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"fastcdr\": \"fastcdr::fastcdr\"}\n )\n\n def _create_cmake_module_alias_targets(self, module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(f\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\")\n save(self, module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", f\"conan-official-{self.name}-targets.cmake\")\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"fastcdr\")\n self.cpp_info.set_property(\"cmake_target_name\", \"fastcdr\")\n self.cpp_info.libs = collect_libs(self)\n if self.settings.os == \"Windows\" and self.options.shared:\n self.cpp_info.defines.append(\"FASTCDR_DYN_LINK\")\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"fastcdr\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"fastcdr\"\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n", "path": "recipes/fast-cdr/all/conanfile.py"}]} | 1,632 | 232 |
gh_patches_debug_1457 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-539 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use list markup for lists of elements
Part of the BITV-Test: "1.3.1b HTML-Strukturelemente für Listen"
----
- [x] list of blueprints
- [x] list of projects
unsure:
- [ ] list of questions in poll contents
- [ ] list of choices in poll contents
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/contrib/templatetags/contrib_tags.py`
Content:
```
1 from django import template
2 from django.template.loader import render_to_string
3
4 register = template.Library()
5
6
7 @register.assignment_tag
8 def include_template_string(template, **kwargs):
9 rendered_template = render_to_string(template, kwargs)
10 return str(rendered_template)
11
12
13 @register.assignment_tag
14 def combined_url_parameter(request_query_dict, **kwargs):
15 combined_query_dict = request_query_dict.copy()
16 for key in kwargs:
17 combined_query_dict.setlist(key, [kwargs[key]])
18 encoded_parameter = '?' + combined_query_dict.urlencode()
19 return encoded_parameter
20
21
22 @register.assignment_tag
23 def filter_has_perm(perm, user, objects):
24 """Filter a list of objects based on user permissions."""
25 if not hasattr(user, 'has_perm'):
26 # If the swapped user model does not support permissions, all objects
27 # will be returned. This is taken from rules.templatetags.has_perm.
28 return objects
29 else:
30 return (obj for obj in objects if user.has_perm(perm, obj))
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py
--- a/apps/contrib/templatetags/contrib_tags.py
+++ b/apps/contrib/templatetags/contrib_tags.py
@@ -27,4 +27,4 @@
# will be returned. This is taken from rules.templatetags.has_perm.
return objects
else:
- return (obj for obj in objects if user.has_perm(perm, obj))
+ return [obj for obj in objects if user.has_perm(perm, obj)]
| {"golden_diff": "diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py\n--- a/apps/contrib/templatetags/contrib_tags.py\n+++ b/apps/contrib/templatetags/contrib_tags.py\n@@ -27,4 +27,4 @@\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n- return (obj for obj in objects if user.has_perm(perm, obj))\n+ return [obj for obj in objects if user.has_perm(perm, obj)]\n", "issue": "use list markup for lists of elements\nPart of the BITV-Test: \"1.3.1b HTML-Strukturelemente f\u00fcr Listen\"\r\n----\r\n- [x] list of blueprints\r\n- [x] list of projects\r\n\r\nunsure:\r\n- [ ] list of questions in poll contents\r\n- [ ] list of choices in poll contents \n", "before_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return (obj for obj in objects if user.has_perm(perm, obj))\n", "path": "apps/contrib/templatetags/contrib_tags.py"}], "after_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return [obj for obj in objects if user.has_perm(perm, obj)]\n", "path": "apps/contrib/templatetags/contrib_tags.py"}]} | 609 | 134 |
gh_patches_debug_11284 | rasdani/github-patches | git_diff | encode__starlette-867 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Invalid cookie name leads to exception
When handling a request with an invalid cookie name (does not conform to RFC2109) starlette raises an exception. i.e iam/cookiename
This is because Starlette uses Python's stdlib cookie library, which is very strict.
I do understand the strictness, but in real life scenarios you receive such malformed cookies and I want to handle those requests.
My suggestion for a solution would be to catch those exceptions and ignore the invalid cookie.
** EDIT **
I just realized stdlib is used for the whole cookie header, hence can't ignore only one cookie.
I'll create a PR for ignoring the whole cookie on such case, but maybe we should create our own Cookie/Morsel class and override the methods to ignore such error in the inbound case?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/requests.py`
Content:
```
1 import asyncio
2 import http.cookies
3 import json
4 import typing
5 from collections.abc import Mapping
6
7 from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State
8 from starlette.formparsers import FormParser, MultiPartParser
9 from starlette.types import Message, Receive, Scope, Send
10
11 try:
12 from multipart.multipart import parse_options_header
13 except ImportError: # pragma: nocover
14 parse_options_header = None
15
16
17 SERVER_PUSH_HEADERS_TO_COPY = {
18 "accept",
19 "accept-encoding",
20 "accept-language",
21 "cache-control",
22 "user-agent",
23 }
24
25
26 class ClientDisconnect(Exception):
27 pass
28
29
30 class HTTPConnection(Mapping):
31 """
32 A base class for incoming HTTP connections, that is used to provide
33 any functionality that is common to both `Request` and `WebSocket`.
34 """
35
36 def __init__(self, scope: Scope, receive: Receive = None) -> None:
37 assert scope["type"] in ("http", "websocket")
38 self.scope = scope
39
40 def __getitem__(self, key: str) -> str:
41 return self.scope[key]
42
43 def __iter__(self) -> typing.Iterator[str]:
44 return iter(self.scope)
45
46 def __len__(self) -> int:
47 return len(self.scope)
48
49 @property
50 def app(self) -> typing.Any:
51 return self.scope["app"]
52
53 @property
54 def url(self) -> URL:
55 if not hasattr(self, "_url"):
56 self._url = URL(scope=self.scope)
57 return self._url
58
59 @property
60 def base_url(self) -> URL:
61 if not hasattr(self, "_base_url"):
62 base_url_scope = dict(self.scope)
63 base_url_scope["path"] = "/"
64 base_url_scope["query_string"] = b""
65 base_url_scope["root_path"] = base_url_scope.get(
66 "app_root_path", base_url_scope.get("root_path", "")
67 )
68 self._base_url = URL(scope=base_url_scope)
69 return self._base_url
70
71 @property
72 def headers(self) -> Headers:
73 if not hasattr(self, "_headers"):
74 self._headers = Headers(scope=self.scope)
75 return self._headers
76
77 @property
78 def query_params(self) -> QueryParams:
79 if not hasattr(self, "_query_params"):
80 self._query_params = QueryParams(self.scope["query_string"])
81 return self._query_params
82
83 @property
84 def path_params(self) -> dict:
85 return self.scope.get("path_params", {})
86
87 @property
88 def cookies(self) -> typing.Dict[str, str]:
89 if not hasattr(self, "_cookies"):
90 cookies = {}
91 cookie_header = self.headers.get("cookie")
92 if cookie_header:
93 cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie
94 cookie.load(cookie_header)
95 for key, morsel in cookie.items():
96 cookies[key] = morsel.value
97 self._cookies = cookies
98 return self._cookies
99
100 @property
101 def client(self) -> Address:
102 host, port = self.scope.get("client") or (None, None)
103 return Address(host=host, port=port)
104
105 @property
106 def session(self) -> dict:
107 assert (
108 "session" in self.scope
109 ), "SessionMiddleware must be installed to access request.session"
110 return self.scope["session"]
111
112 @property
113 def auth(self) -> typing.Any:
114 assert (
115 "auth" in self.scope
116 ), "AuthenticationMiddleware must be installed to access request.auth"
117 return self.scope["auth"]
118
119 @property
120 def user(self) -> typing.Any:
121 assert (
122 "user" in self.scope
123 ), "AuthenticationMiddleware must be installed to access request.user"
124 return self.scope["user"]
125
126 @property
127 def state(self) -> State:
128 if not hasattr(self, "_state"):
129 # Ensure 'state' has an empty dict if it's not already populated.
130 self.scope.setdefault("state", {})
131 # Create a state instance with a reference to the dict in which it should store info
132 self._state = State(self.scope["state"])
133 return self._state
134
135 def url_for(self, name: str, **path_params: typing.Any) -> str:
136 router = self.scope["router"]
137 url_path = router.url_path_for(name, **path_params)
138 return url_path.make_absolute_url(base_url=self.base_url)
139
140
141 async def empty_receive() -> Message:
142 raise RuntimeError("Receive channel has not been made available")
143
144
145 async def empty_send(message: Message) -> None:
146 raise RuntimeError("Send channel has not been made available")
147
148
149 class Request(HTTPConnection):
150 def __init__(
151 self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
152 ):
153 super().__init__(scope)
154 assert scope["type"] == "http"
155 self._receive = receive
156 self._send = send
157 self._stream_consumed = False
158 self._is_disconnected = False
159
160 @property
161 def method(self) -> str:
162 return self.scope["method"]
163
164 @property
165 def receive(self) -> Receive:
166 return self._receive
167
168 async def stream(self) -> typing.AsyncGenerator[bytes, None]:
169 if hasattr(self, "_body"):
170 yield self._body
171 yield b""
172 return
173
174 if self._stream_consumed:
175 raise RuntimeError("Stream consumed")
176
177 self._stream_consumed = True
178 while True:
179 message = await self._receive()
180 if message["type"] == "http.request":
181 body = message.get("body", b"")
182 if body:
183 yield body
184 if not message.get("more_body", False):
185 break
186 elif message["type"] == "http.disconnect":
187 self._is_disconnected = True
188 raise ClientDisconnect()
189 yield b""
190
191 async def body(self) -> bytes:
192 if not hasattr(self, "_body"):
193 chunks = []
194 async for chunk in self.stream():
195 chunks.append(chunk)
196 self._body = b"".join(chunks)
197 return self._body
198
199 async def json(self) -> typing.Any:
200 if not hasattr(self, "_json"):
201 body = await self.body()
202 self._json = json.loads(body)
203 return self._json
204
205 async def form(self) -> FormData:
206 if not hasattr(self, "_form"):
207 assert (
208 parse_options_header is not None
209 ), "The `python-multipart` library must be installed to use form parsing."
210 content_type_header = self.headers.get("Content-Type")
211 content_type, options = parse_options_header(content_type_header)
212 if content_type == b"multipart/form-data":
213 multipart_parser = MultiPartParser(self.headers, self.stream())
214 self._form = await multipart_parser.parse()
215 elif content_type == b"application/x-www-form-urlencoded":
216 form_parser = FormParser(self.headers, self.stream())
217 self._form = await form_parser.parse()
218 else:
219 self._form = FormData()
220 return self._form
221
222 async def close(self) -> None:
223 if hasattr(self, "_form"):
224 await self._form.close()
225
226 async def is_disconnected(self) -> bool:
227 if not self._is_disconnected:
228 try:
229 message = await asyncio.wait_for(self._receive(), timeout=0.0000001)
230 except asyncio.TimeoutError:
231 message = {}
232
233 if message.get("type") == "http.disconnect":
234 self._is_disconnected = True
235
236 return self._is_disconnected
237
238 async def send_push_promise(self, path: str) -> None:
239 if "http.response.push" in self.scope.get("extensions", {}):
240 raw_headers = []
241 for name in SERVER_PUSH_HEADERS_TO_COPY:
242 for value in self.headers.getlist(name):
243 raw_headers.append(
244 (name.encode("latin-1"), value.encode("latin-1"))
245 )
246 await self._send(
247 {"type": "http.response.push", "path": path, "headers": raw_headers}
248 )
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/requests.py b/starlette/requests.py
--- a/starlette/requests.py
+++ b/starlette/requests.py
@@ -91,7 +91,10 @@
cookie_header = self.headers.get("cookie")
if cookie_header:
cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie
- cookie.load(cookie_header)
+ try:
+ cookie.load(cookie_header)
+ except http.cookies.CookieError:
+ pass
for key, morsel in cookie.items():
cookies[key] = morsel.value
self._cookies = cookies
| {"golden_diff": "diff --git a/starlette/requests.py b/starlette/requests.py\n--- a/starlette/requests.py\n+++ b/starlette/requests.py\n@@ -91,7 +91,10 @@\n cookie_header = self.headers.get(\"cookie\")\n if cookie_header:\n cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie\n- cookie.load(cookie_header)\n+ try:\n+ cookie.load(cookie_header)\n+ except http.cookies.CookieError:\n+ pass\n for key, morsel in cookie.items():\n cookies[key] = morsel.value\n self._cookies = cookies\n", "issue": "[bug] Invalid cookie name leads to exception\nWhen handling a request with an invalid cookie name (does not conform to RFC2109) starlette raises an exception. i.e iam/cookiename\r\nThis is because Starlette uses Python's stdlib cookie library, which is very strict.\r\nI do understand the strictness, but in real life scenarios you receive such malformed cookies and I want to handle those requests.\r\nMy suggestion for a solution would be to catch those exceptions and ignore the invalid cookie. \r\n** EDIT **\r\nI just realized stdlib is used for the whole cookie header, hence can't ignore only one cookie.\r\nI'll create a PR for ignoring the whole cookie on such case, but maybe we should create our own Cookie/Morsel class and override the methods to ignore such error in the inbound case?\n", "before_files": [{"content": "import asyncio\nimport http.cookies\nimport json\nimport typing\nfrom collections.abc import Mapping\n\nfrom starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State\nfrom starlette.formparsers import FormParser, MultiPartParser\nfrom starlette.types import Message, Receive, Scope, Send\n\ntry:\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n\n\nSERVER_PUSH_HEADERS_TO_COPY = {\n \"accept\",\n \"accept-encoding\",\n \"accept-language\",\n \"cache-control\",\n \"user-agent\",\n}\n\n\nclass ClientDisconnect(Exception):\n pass\n\n\nclass HTTPConnection(Mapping):\n \"\"\"\n A base class for incoming HTTP connections, that is used to provide\n any functionality that is common to both `Request` and `WebSocket`.\n \"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = None) -> None:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n self.scope = scope\n\n def __getitem__(self, key: str) -> str:\n return self.scope[key]\n\n def __iter__(self) -> typing.Iterator[str]:\n return iter(self.scope)\n\n def __len__(self) -> int:\n return len(self.scope)\n\n @property\n def app(self) -> typing.Any:\n return self.scope[\"app\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n self._url = URL(scope=self.scope)\n return self._url\n\n @property\n def base_url(self) -> URL:\n if not hasattr(self, \"_base_url\"):\n base_url_scope = dict(self.scope)\n base_url_scope[\"path\"] = \"/\"\n base_url_scope[\"query_string\"] = b\"\"\n base_url_scope[\"root_path\"] = base_url_scope.get(\n \"app_root_path\", base_url_scope.get(\"root_path\", \"\")\n )\n self._base_url = URL(scope=base_url_scope)\n return self._base_url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(scope=self.scope)\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n self._query_params = QueryParams(self.scope[\"query_string\"])\n return self._query_params\n\n @property\n def path_params(self) -> dict:\n return self.scope.get(\"path_params\", {})\n\n @property\n def cookies(self) -> typing.Dict[str, str]:\n if not hasattr(self, \"_cookies\"):\n cookies = {}\n cookie_header = self.headers.get(\"cookie\")\n if cookie_header:\n cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie\n cookie.load(cookie_header)\n for key, morsel in cookie.items():\n cookies[key] = morsel.value\n self._cookies = cookies\n return self._cookies\n\n @property\n def client(self) -> Address:\n host, port = self.scope.get(\"client\") or (None, None)\n return Address(host=host, port=port)\n\n @property\n def session(self) -> dict:\n assert (\n \"session\" in self.scope\n ), \"SessionMiddleware must be installed to access request.session\"\n return self.scope[\"session\"]\n\n @property\n def auth(self) -> typing.Any:\n assert (\n \"auth\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.auth\"\n return self.scope[\"auth\"]\n\n @property\n def user(self) -> typing.Any:\n assert (\n \"user\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.user\"\n return self.scope[\"user\"]\n\n @property\n def state(self) -> State:\n if not hasattr(self, \"_state\"):\n # Ensure 'state' has an empty dict if it's not already populated.\n self.scope.setdefault(\"state\", {})\n # Create a state instance with a reference to the dict in which it should store info\n self._state = State(self.scope[\"state\"])\n return self._state\n\n def url_for(self, name: str, **path_params: typing.Any) -> str:\n router = self.scope[\"router\"]\n url_path = router.url_path_for(name, **path_params)\n return url_path.make_absolute_url(base_url=self.base_url)\n\n\nasync def empty_receive() -> Message:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n\nasync def empty_send(message: Message) -> None:\n raise RuntimeError(\"Send channel has not been made available\")\n\n\nclass Request(HTTPConnection):\n def __init__(\n self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send\n ):\n super().__init__(scope)\n assert scope[\"type\"] == \"http\"\n self._receive = receive\n self._send = send\n self._stream_consumed = False\n self._is_disconnected = False\n\n @property\n def method(self) -> str:\n return self.scope[\"method\"]\n\n @property\n def receive(self) -> Receive:\n return self._receive\n\n async def stream(self) -> typing.AsyncGenerator[bytes, None]:\n if hasattr(self, \"_body\"):\n yield self._body\n yield b\"\"\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n elif message[\"type\"] == \"http.disconnect\":\n self._is_disconnected = True\n raise ClientDisconnect()\n yield b\"\"\n\n async def body(self) -> bytes:\n if not hasattr(self, \"_body\"):\n chunks = []\n async for chunk in self.stream():\n chunks.append(chunk)\n self._body = b\"\".join(chunks)\n return self._body\n\n async def json(self) -> typing.Any:\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n\n async def form(self) -> FormData:\n if not hasattr(self, \"_form\"):\n assert (\n parse_options_header is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n content_type_header = self.headers.get(\"Content-Type\")\n content_type, options = parse_options_header(content_type_header)\n if content_type == b\"multipart/form-data\":\n multipart_parser = MultiPartParser(self.headers, self.stream())\n self._form = await multipart_parser.parse()\n elif content_type == b\"application/x-www-form-urlencoded\":\n form_parser = FormParser(self.headers, self.stream())\n self._form = await form_parser.parse()\n else:\n self._form = FormData()\n return self._form\n\n async def close(self) -> None:\n if hasattr(self, \"_form\"):\n await self._form.close()\n\n async def is_disconnected(self) -> bool:\n if not self._is_disconnected:\n try:\n message = await asyncio.wait_for(self._receive(), timeout=0.0000001)\n except asyncio.TimeoutError:\n message = {}\n\n if message.get(\"type\") == \"http.disconnect\":\n self._is_disconnected = True\n\n return self._is_disconnected\n\n async def send_push_promise(self, path: str) -> None:\n if \"http.response.push\" in self.scope.get(\"extensions\", {}):\n raw_headers = []\n for name in SERVER_PUSH_HEADERS_TO_COPY:\n for value in self.headers.getlist(name):\n raw_headers.append(\n (name.encode(\"latin-1\"), value.encode(\"latin-1\"))\n )\n await self._send(\n {\"type\": \"http.response.push\", \"path\": path, \"headers\": raw_headers}\n )\n", "path": "starlette/requests.py"}], "after_files": [{"content": "import asyncio\nimport http.cookies\nimport json\nimport typing\nfrom collections.abc import Mapping\n\nfrom starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State\nfrom starlette.formparsers import FormParser, MultiPartParser\nfrom starlette.types import Message, Receive, Scope, Send\n\ntry:\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n\n\nSERVER_PUSH_HEADERS_TO_COPY = {\n \"accept\",\n \"accept-encoding\",\n \"accept-language\",\n \"cache-control\",\n \"user-agent\",\n}\n\n\nclass ClientDisconnect(Exception):\n pass\n\n\nclass HTTPConnection(Mapping):\n \"\"\"\n A base class for incoming HTTP connections, that is used to provide\n any functionality that is common to both `Request` and `WebSocket`.\n \"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = None) -> None:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n self.scope = scope\n\n def __getitem__(self, key: str) -> str:\n return self.scope[key]\n\n def __iter__(self) -> typing.Iterator[str]:\n return iter(self.scope)\n\n def __len__(self) -> int:\n return len(self.scope)\n\n @property\n def app(self) -> typing.Any:\n return self.scope[\"app\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n self._url = URL(scope=self.scope)\n return self._url\n\n @property\n def base_url(self) -> URL:\n if not hasattr(self, \"_base_url\"):\n base_url_scope = dict(self.scope)\n base_url_scope[\"path\"] = \"/\"\n base_url_scope[\"query_string\"] = b\"\"\n base_url_scope[\"root_path\"] = base_url_scope.get(\n \"app_root_path\", base_url_scope.get(\"root_path\", \"\")\n )\n self._base_url = URL(scope=base_url_scope)\n return self._base_url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(scope=self.scope)\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n self._query_params = QueryParams(self.scope[\"query_string\"])\n return self._query_params\n\n @property\n def path_params(self) -> dict:\n return self.scope.get(\"path_params\", {})\n\n @property\n def cookies(self) -> typing.Dict[str, str]:\n if not hasattr(self, \"_cookies\"):\n cookies = {}\n cookie_header = self.headers.get(\"cookie\")\n if cookie_header:\n cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie\n try:\n cookie.load(cookie_header)\n except http.cookies.CookieError:\n pass\n for key, morsel in cookie.items():\n cookies[key] = morsel.value\n self._cookies = cookies\n return self._cookies\n\n @property\n def client(self) -> Address:\n host, port = self.scope.get(\"client\") or (None, None)\n return Address(host=host, port=port)\n\n @property\n def session(self) -> dict:\n assert (\n \"session\" in self.scope\n ), \"SessionMiddleware must be installed to access request.session\"\n return self.scope[\"session\"]\n\n @property\n def auth(self) -> typing.Any:\n assert (\n \"auth\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.auth\"\n return self.scope[\"auth\"]\n\n @property\n def user(self) -> typing.Any:\n assert (\n \"user\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.user\"\n return self.scope[\"user\"]\n\n @property\n def state(self) -> State:\n if not hasattr(self, \"_state\"):\n # Ensure 'state' has an empty dict if it's not already populated.\n self.scope.setdefault(\"state\", {})\n # Create a state instance with a reference to the dict in which it should store info\n self._state = State(self.scope[\"state\"])\n return self._state\n\n def url_for(self, name: str, **path_params: typing.Any) -> str:\n router = self.scope[\"router\"]\n url_path = router.url_path_for(name, **path_params)\n return url_path.make_absolute_url(base_url=self.base_url)\n\n\nasync def empty_receive() -> Message:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n\nasync def empty_send(message: Message) -> None:\n raise RuntimeError(\"Send channel has not been made available\")\n\n\nclass Request(HTTPConnection):\n def __init__(\n self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send\n ):\n super().__init__(scope)\n assert scope[\"type\"] == \"http\"\n self._receive = receive\n self._send = send\n self._stream_consumed = False\n self._is_disconnected = False\n\n @property\n def method(self) -> str:\n return self.scope[\"method\"]\n\n @property\n def receive(self) -> Receive:\n return self._receive\n\n async def stream(self) -> typing.AsyncGenerator[bytes, None]:\n if hasattr(self, \"_body\"):\n yield self._body\n yield b\"\"\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n elif message[\"type\"] == \"http.disconnect\":\n self._is_disconnected = True\n raise ClientDisconnect()\n yield b\"\"\n\n async def body(self) -> bytes:\n if not hasattr(self, \"_body\"):\n chunks = []\n async for chunk in self.stream():\n chunks.append(chunk)\n self._body = b\"\".join(chunks)\n return self._body\n\n async def json(self) -> typing.Any:\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n\n async def form(self) -> FormData:\n if not hasattr(self, \"_form\"):\n assert (\n parse_options_header is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n content_type_header = self.headers.get(\"Content-Type\")\n content_type, options = parse_options_header(content_type_header)\n if content_type == b\"multipart/form-data\":\n multipart_parser = MultiPartParser(self.headers, self.stream())\n self._form = await multipart_parser.parse()\n elif content_type == b\"application/x-www-form-urlencoded\":\n form_parser = FormParser(self.headers, self.stream())\n self._form = await form_parser.parse()\n else:\n self._form = FormData()\n return self._form\n\n async def close(self) -> None:\n if hasattr(self, \"_form\"):\n await self._form.close()\n\n async def is_disconnected(self) -> bool:\n if not self._is_disconnected:\n try:\n message = await asyncio.wait_for(self._receive(), timeout=0.0000001)\n except asyncio.TimeoutError:\n message = {}\n\n if message.get(\"type\") == \"http.disconnect\":\n self._is_disconnected = True\n\n return self._is_disconnected\n\n async def send_push_promise(self, path: str) -> None:\n if \"http.response.push\" in self.scope.get(\"extensions\", {}):\n raw_headers = []\n for name in SERVER_PUSH_HEADERS_TO_COPY:\n for value in self.headers.getlist(name):\n raw_headers.append(\n (name.encode(\"latin-1\"), value.encode(\"latin-1\"))\n )\n await self._send(\n {\"type\": \"http.response.push\", \"path\": path, \"headers\": raw_headers}\n )\n", "path": "starlette/requests.py"}]} | 2,825 | 135 |
gh_patches_debug_24475 | rasdani/github-patches | git_diff | pyca__cryptography-5022 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement env var for OpenSSL 1.0.1 support
As part of #4923 we need to add `CRYPTOGRAPHY_ALLOW_OPENSSL_101` and update CI to test using that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/bindings/openssl/binding.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import collections
8 import threading
9 import types
10 import warnings
11
12 import cryptography
13 from cryptography import utils
14 from cryptography.exceptions import InternalError
15 from cryptography.hazmat.bindings._openssl import ffi, lib
16 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
17
18 _OpenSSLErrorWithText = collections.namedtuple(
19 "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"]
20 )
21
22
23 class _OpenSSLError(object):
24 def __init__(self, code, lib, func, reason):
25 self._code = code
26 self._lib = lib
27 self._func = func
28 self._reason = reason
29
30 def _lib_reason_match(self, lib, reason):
31 return lib == self.lib and reason == self.reason
32
33 code = utils.read_only_property("_code")
34 lib = utils.read_only_property("_lib")
35 func = utils.read_only_property("_func")
36 reason = utils.read_only_property("_reason")
37
38
39 def _consume_errors(lib):
40 errors = []
41 while True:
42 code = lib.ERR_get_error()
43 if code == 0:
44 break
45
46 err_lib = lib.ERR_GET_LIB(code)
47 err_func = lib.ERR_GET_FUNC(code)
48 err_reason = lib.ERR_GET_REASON(code)
49
50 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))
51
52 return errors
53
54
55 def _openssl_assert(lib, ok):
56 if not ok:
57 errors = _consume_errors(lib)
58 errors_with_text = []
59 for err in errors:
60 buf = ffi.new("char[]", 256)
61 lib.ERR_error_string_n(err.code, buf, len(buf))
62 err_text_reason = ffi.string(buf)
63
64 errors_with_text.append(
65 _OpenSSLErrorWithText(
66 err.code, err.lib, err.func, err.reason, err_text_reason
67 )
68 )
69
70 raise InternalError(
71 "Unknown OpenSSL error. This error is commonly encountered when "
72 "another library is not cleaning up the OpenSSL error stack. If "
73 "you are using cryptography with another library that uses "
74 "OpenSSL try disabling it before reporting a bug. Otherwise "
75 "please file an issue at https://github.com/pyca/cryptography/"
76 "issues with information on how to reproduce "
77 "this. ({0!r})".format(errors_with_text),
78 errors_with_text
79 )
80
81
82 def build_conditional_library(lib, conditional_names):
83 conditional_lib = types.ModuleType("lib")
84 conditional_lib._original_lib = lib
85 excluded_names = set()
86 for condition, names_cb in conditional_names.items():
87 if not getattr(lib, condition):
88 excluded_names.update(names_cb())
89
90 for attr in dir(lib):
91 if attr not in excluded_names:
92 setattr(conditional_lib, attr, getattr(lib, attr))
93
94 return conditional_lib
95
96
97 class Binding(object):
98 """
99 OpenSSL API wrapper.
100 """
101 lib = None
102 ffi = ffi
103 _lib_loaded = False
104 _init_lock = threading.Lock()
105 _lock_init_lock = threading.Lock()
106
107 def __init__(self):
108 self._ensure_ffi_initialized()
109
110 @classmethod
111 def _register_osrandom_engine(cls):
112 # Clear any errors extant in the queue before we start. In many
113 # scenarios other things may be interacting with OpenSSL in the same
114 # process space and it has proven untenable to assume that they will
115 # reliably clear the error queue. Once we clear it here we will
116 # error on any subsequent unexpected item in the stack.
117 cls.lib.ERR_clear_error()
118 if cls.lib.Cryptography_HAS_ENGINE:
119 result = cls.lib.Cryptography_add_osrandom_engine()
120 _openssl_assert(cls.lib, result in (1, 2))
121
122 @classmethod
123 def _ensure_ffi_initialized(cls):
124 with cls._init_lock:
125 if not cls._lib_loaded:
126 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
127 cls._lib_loaded = True
128 # initialize the SSL library
129 cls.lib.SSL_library_init()
130 # adds all ciphers/digests for EVP
131 cls.lib.OpenSSL_add_all_algorithms()
132 # loads error strings for libcrypto and libssl functions
133 cls.lib.SSL_load_error_strings()
134 cls._register_osrandom_engine()
135
136 @classmethod
137 def init_static_locks(cls):
138 with cls._lock_init_lock:
139 cls._ensure_ffi_initialized()
140 # Use Python's implementation if available, importing _ssl triggers
141 # the setup for this.
142 __import__("_ssl")
143
144 if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or
145 cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):
146 return
147
148 # If nothing else has setup a locking callback already, we set up
149 # our own
150 res = lib.Cryptography_setup_ssl_threads()
151 _openssl_assert(cls.lib, res == 1)
152
153
154 def _verify_openssl_version(lib):
155 if (
156 lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and
157 not lib.CRYPTOGRAPHY_IS_LIBRESSL
158 ):
159 warnings.warn(
160 "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
161 "project, please upgrade. The next version of cryptography will "
162 "drop support for it.",
163 utils.CryptographyDeprecationWarning
164 )
165
166
167 def _verify_package_version(version):
168 # Occasionally we run into situations where the version of the Python
169 # package does not match the version of the shared object that is loaded.
170 # This may occur in environments where multiple versions of cryptography
171 # are installed and available in the python path. To avoid errors cropping
172 # up later this code checks that the currently imported package and the
173 # shared object that were loaded have the same version and raise an
174 # ImportError if they do not
175 so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)
176 if version.encode("ascii") != so_package_version:
177 raise ImportError(
178 "The version of cryptography does not match the loaded "
179 "shared object. This can happen if you have multiple copies of "
180 "cryptography installed in your Python path. Please try creating "
181 "a new virtual environment to resolve this issue. "
182 "Loaded python version: {}, shared object version: {}".format(
183 version, so_package_version
184 )
185 )
186
187
188 _verify_package_version(cryptography.__version__)
189
190 # OpenSSL is not thread safe until the locks are initialized. We call this
191 # method in module scope so that it executes with the import lock. On
192 # Pythons < 3.4 this import lock is a global lock, which can prevent a race
193 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
194 # is per module so this approach will not work.
195 Binding.init_static_locks()
196
197 _verify_openssl_version(Binding.lib)
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -5,6 +5,7 @@
from __future__ import absolute_import, division, print_function
import collections
+import os
import threading
import types
import warnings
@@ -156,12 +157,19 @@
lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and
not lib.CRYPTOGRAPHY_IS_LIBRESSL
):
- warnings.warn(
- "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
- "project, please upgrade. The next version of cryptography will "
- "drop support for it.",
- utils.CryptographyDeprecationWarning
- )
+ if os.environ.get("CRYPTOGRAPHY_ALLOW_OPENSSL_101"):
+ warnings.warn(
+ "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
+ "project, please upgrade. The next version of cryptography "
+ "will completely remove support for it.",
+ utils.CryptographyDeprecationWarning
+ )
+ else:
+ raise RuntimeError(
+ "You are linking against OpenSSL 1.0.1, which is no longer "
+ "supported by the OpenSSL project. You need to upgrade to a "
+ "newer version of OpenSSL."
+ )
def _verify_package_version(version):
| {"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -5,6 +5,7 @@\n from __future__ import absolute_import, division, print_function\n \n import collections\n+import os\n import threading\n import types\n import warnings\n@@ -156,12 +157,19 @@\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n- warnings.warn(\n- \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n- \"project, please upgrade. The next version of cryptography will \"\n- \"drop support for it.\",\n- utils.CryptographyDeprecationWarning\n- )\n+ if os.environ.get(\"CRYPTOGRAPHY_ALLOW_OPENSSL_101\"):\n+ warnings.warn(\n+ \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n+ \"project, please upgrade. The next version of cryptography \"\n+ \"will completely remove support for it.\",\n+ utils.CryptographyDeprecationWarning\n+ )\n+ else:\n+ raise RuntimeError(\n+ \"You are linking against OpenSSL 1.0.1, which is no longer \"\n+ \"supported by the OpenSSL project. You need to upgrade to a \"\n+ \"newer version of OpenSSL.\"\n+ )\n \n \n def _verify_package_version(version):\n", "issue": "Implement env var for OpenSSL 1.0.1 support\nAs part of #4923 we need to add `CRYPTOGRAPHY_ALLOW_OPENSSL_101` and update CI to test using that.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport threading\nimport types\nimport warnings\n\nimport cryptography\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n if cls.lib.Cryptography_HAS_ENGINE:\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or\n cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n warnings.warn(\n \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n \"project, please upgrade. The next version of cryptography will \"\n \"drop support for it.\",\n utils.CryptographyDeprecationWarning\n )\n\n\ndef _verify_package_version(version):\n # Occasionally we run into situations where the version of the Python\n # package does not match the version of the shared object that is loaded.\n # This may occur in environments where multiple versions of cryptography\n # are installed and available in the python path. To avoid errors cropping\n # up later this code checks that the currently imported package and the\n # shared object that were loaded have the same version and raise an\n # ImportError if they do not\n so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)\n if version.encode(\"ascii\") != so_package_version:\n raise ImportError(\n \"The version of cryptography does not match the loaded \"\n \"shared object. This can happen if you have multiple copies of \"\n \"cryptography installed in your Python path. Please try creating \"\n \"a new virtual environment to resolve this issue. \"\n \"Loaded python version: {}, shared object version: {}\".format(\n version, so_package_version\n )\n )\n\n\n_verify_package_version(cryptography.__version__)\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport os\nimport threading\nimport types\nimport warnings\n\nimport cryptography\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n if cls.lib.Cryptography_HAS_ENGINE:\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or\n cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n if os.environ.get(\"CRYPTOGRAPHY_ALLOW_OPENSSL_101\"):\n warnings.warn(\n \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n \"project, please upgrade. The next version of cryptography \"\n \"will completely remove support for it.\",\n utils.CryptographyDeprecationWarning\n )\n else:\n raise RuntimeError(\n \"You are linking against OpenSSL 1.0.1, which is no longer \"\n \"supported by the OpenSSL project. You need to upgrade to a \"\n \"newer version of OpenSSL.\"\n )\n\n\ndef _verify_package_version(version):\n # Occasionally we run into situations where the version of the Python\n # package does not match the version of the shared object that is loaded.\n # This may occur in environments where multiple versions of cryptography\n # are installed and available in the python path. To avoid errors cropping\n # up later this code checks that the currently imported package and the\n # shared object that were loaded have the same version and raise an\n # ImportError if they do not\n so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)\n if version.encode(\"ascii\") != so_package_version:\n raise ImportError(\n \"The version of cryptography does not match the loaded \"\n \"shared object. This can happen if you have multiple copies of \"\n \"cryptography installed in your Python path. Please try creating \"\n \"a new virtual environment to resolve this issue. \"\n \"Loaded python version: {}, shared object version: {}\".format(\n version, so_package_version\n )\n )\n\n\n_verify_package_version(cryptography.__version__)\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]} | 2,345 | 353 |
gh_patches_debug_15037 | rasdani/github-patches | git_diff | azavea__raster-vision-1464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch from Gitter to another tool for usage questions
## 🚀 Feature
Currently we use [Gitter](https://gitter.im/azavea/raster-vision) for usage questions, but it's not well designed for this use case. I would like to switch to [Github Discussions](https://github.com/azavea/raster-vision/discussions/landing), [Stack Overflow](https://stackoverflow.com/), or [Discourse](https://www.discourse.org/).
## Motivation
Gitter is designed more as a chat system, and we need something where people can make posts that can be open/closed, and later searchable to avoid repeat questions.
## Pitch
We should spend a small amount of time comparing the different options, make a decision, and then implement the change. Github Discussions seems like the most straightforward option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # flake8: noqa
2
3 from pallets_sphinx_themes import ProjectLink, get_version
4
5 # -*- coding: utf-8 -*-
6 #
7 # Configuration file for the Sphinx documentation builder.
8 #
9 # This file does only contain a selection of the most common options. For a
10 # full list see the documentation:
11 # http://www.sphinx-doc.org/en/stable/config
12
13 # -- Path setup --------------------------------------------------------------
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 # import os
20 # import sys
21 # sys.path.insert(0, os.path.abspath('.'))
22
23 # -- Project information -----------------------------------------------------
24
25 project = 'Raster Vision'
26 copyright = '2018, Azavea'
27 author = 'Azavea'
28
29 # The short X.Y version
30 version = '0.13'
31 # The full version, including alpha/beta/rc tags
32 release = '0.13.1'
33
34 # -- General configuration ---------------------------------------------------
35
36 # If your documentation needs a minimal Sphinx version, state it here.
37 #
38 # needs_sphinx = '1.0'
39
40 # Add any Sphinx extension module names here, as strings. They can be
41 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
42 # ones.
43 extensions = [
44 'sphinx.ext.autodoc',
45 'sphinx.ext.intersphinx',
46 'pallets_sphinx_themes',
47 'sphinx.ext.napoleon',
48 ]
49
50 # https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
51 import sys
52 from unittest.mock import MagicMock
53
54
55 class Mock(MagicMock):
56 @classmethod
57 def __getattr__(cls, name):
58 return MagicMock()
59
60
61 MOCK_MODULES = ['pyproj', 'h5py', 'osgeo', 'mask_to_polygons']
62 sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
63
64 autodoc_mock_imports = ['torch', 'torchvision', 'pycocotools']
65
66 intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
67
68 # Add any paths that contain templates here, relative to this directory.
69 templates_path = ['_templates']
70
71 # The suffix(es) of source filenames.
72 # You can specify multiple suffix as a list of string:
73 #
74 # source_suffix = ['.rst', '.md']
75 source_suffix = '.rst'
76
77 # The master toctree document.
78 master_doc = 'index'
79
80 # The language for content autogenerated by Sphinx. Refer to documentation
81 # for a list of supported languages.
82 #
83 # This is also used if you do content translation via gettext catalogs.
84 # Usually you set "language" from the command line for these cases.
85 language = None
86
87 # List of patterns, relative to source directory, that match files and
88 # directories to ignore when looking for source files.
89 # This pattern also affects html_static_path and html_extra_path .
90 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']
91
92 # The name of the Pygments (syntax highlighting) style to use.
93 # pygments_style = 'sphinx'
94
95 # HTML -----------------------------------------------------------------
96
97 html_theme = 'click'
98 html_theme_options = {'index_sidebar_logo': False}
99 html_context = {
100 'project_links': [
101 ProjectLink('Quickstart', 'quickstart.html'),
102 ProjectLink('Documentation TOC', 'index.html#documentation'),
103 ProjectLink('Examples', 'examples.html'),
104 ProjectLink('Config API Reference', 'index.html#api'),
105 ProjectLink('AWS Batch Setup', 'cloudformation.html'),
106 ProjectLink('Project Website', 'https://rastervision.io/'),
107 ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),
108 ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),
109 ProjectLink('Gitter Channel',
110 'https://gitter.im/azavea/raster-vision'),
111 ProjectLink('Issue Tracker',
112 'https://github.com/azavea/raster-vision/issues/'),
113 ProjectLink('CHANGELOG', 'changelog.html'),
114 ProjectLink('Azavea', 'https://www.azavea.com/'),
115 ],
116 'css_files': [
117 '_static/rastervision.css',
118 'https://media.readthedocs.org/css/badge_only.css'
119 ]
120 }
121 html_sidebars = {
122 'index': ['project.html', 'versions.html', 'searchbox.html'],
123 '**': [
124 'project.html', 'localtoc.html', 'relations.html', 'versions.html',
125 'searchbox.html'
126 ],
127 }
128 singlehtml_sidebars = {
129 'index': ['project.html', 'versions.html', 'localtoc.html']
130 }
131 html_static_path = ['_static']
132 html_favicon = 'img/raster-vision-icon.png'
133 html_logo = 'img/raster-vision-logo.png'
134 html_title = 'Raster Vision Documentation ({})'.format(version)
135 html_show_sourcelink = False
136 html_domain_indices = False
137 html_experimental_html5_writer = True
138
139 # -- Options for HTMLHelp output ---------------------------------------------
140
141 # Output file base name for HTML help builder.
142 htmlhelp_basename = 'RasterVisiondoc'
143
144 # -- Options for LaTeX output ------------------------------------------------
145
146 latex_elements = {
147 # The paper size ('letterpaper' or 'a4paper').
148 #
149 # 'papersize': 'letterpaper',
150
151 # The font size ('10pt', '11pt' or '12pt').
152 #
153 # 'pointsize': '10pt',
154
155 # Additional stuff for the LaTeX preamble.
156 #
157 # 'preamble': '',
158
159 # Latex figure (float) alignment
160 #
161 # 'figure_align': 'htbp',
162 }
163
164 # Grouping the document tree into LaTeX files. List of tuples
165 # (source start file, target name, title,
166 # author, documentclass [howto, manual, or own class]).
167 latex_documents = [
168 (master_doc, 'RasterVision.tex', 'Raster Vision Documentation', 'Azavea',
169 'manual'),
170 ]
171
172 # -- Options for manual page output ------------------------------------------
173
174 # One entry per manual page. List of tuples
175 # (source start file, name, description, authors, manual section).
176 man_pages = [(master_doc, 'RasterVisoin-{}.tex', html_title, [author],
177 'manual')]
178
179 # -- Options for Texinfo output ----------------------------------------------
180
181 # Grouping the document tree into Texinfo files. List of tuples
182 # (source start file, target name, title, author,
183 # dir menu entry, description, category)
184 texinfo_documents = [
185 (master_doc, 'RasterVision', 'Raster Vision Documentation', author,
186 'RasterVision', 'One line description of project.', 'Miscellaneous'),
187 ]
188
189 # -- Extension configuration -------------------------------------------------
190
191 programoutput_prompt_template = '> {command}\n{output}'
192
193 # -- Options for todo extension ----------------------------------------------
194
195 # If true, `todo` and `todoList` produce output, else they produce nothing.
196 todo_include_todos = True
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -106,8 +106,8 @@
ProjectLink('Project Website', 'https://rastervision.io/'),
ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),
ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),
- ProjectLink('Gitter Channel',
- 'https://gitter.im/azavea/raster-vision'),
+ ProjectLink('Discussion Forum',
+ 'https://github.com/azavea/raster-vision/discussions'),
ProjectLink('Issue Tracker',
'https://github.com/azavea/raster-vision/issues/'),
ProjectLink('CHANGELOG', 'changelog.html'),
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -106,8 +106,8 @@\n ProjectLink('Project Website', 'https://rastervision.io/'),\n ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),\n ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),\n- ProjectLink('Gitter Channel',\n- 'https://gitter.im/azavea/raster-vision'),\n+ ProjectLink('Discussion Forum',\n+ 'https://github.com/azavea/raster-vision/discussions'),\n ProjectLink('Issue Tracker',\n 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n", "issue": "Switch from Gitter to another tool for usage questions\n## \ud83d\ude80 Feature\r\n\r\nCurrently we use [Gitter](https://gitter.im/azavea/raster-vision) for usage questions, but it's not well designed for this use case. I would like to switch to [Github Discussions](https://github.com/azavea/raster-vision/discussions/landing), [Stack Overflow](https://stackoverflow.com/), or [Discourse](https://www.discourse.org/). \r\n\r\n## Motivation\r\n\r\nGitter is designed more as a chat system, and we need something where people can make posts that can be open/closed, and later searchable to avoid repeat questions.\r\n\r\n## Pitch\r\n\r\nWe should spend a small amount of time comparing the different options, make a decision, and then implement the change. Github Discussions seems like the most straightforward option.\r\n\r\n\n", "before_files": [{"content": "# flake8: noqa\n\nfrom pallets_sphinx_themes import ProjectLink, get_version\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Raster Vision'\ncopyright = '2018, Azavea'\nauthor = 'Azavea'\n\n# The short X.Y version\nversion = '0.13'\n# The full version, including alpha/beta/rc tags\nrelease = '0.13.1'\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'pallets_sphinx_themes',\n 'sphinx.ext.napoleon',\n]\n\n# https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules\nimport sys\nfrom unittest.mock import MagicMock\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return MagicMock()\n\n\nMOCK_MODULES = ['pyproj', 'h5py', 'osgeo', 'mask_to_polygons']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\nautodoc_mock_imports = ['torch', 'torchvision', 'pycocotools']\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'sphinx'\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = 'click'\nhtml_theme_options = {'index_sidebar_logo': False}\nhtml_context = {\n 'project_links': [\n ProjectLink('Quickstart', 'quickstart.html'),\n ProjectLink('Documentation TOC', 'index.html#documentation'),\n ProjectLink('Examples', 'examples.html'),\n ProjectLink('Config API Reference', 'index.html#api'),\n ProjectLink('AWS Batch Setup', 'cloudformation.html'),\n ProjectLink('Project Website', 'https://rastervision.io/'),\n ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),\n ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),\n ProjectLink('Gitter Channel',\n 'https://gitter.im/azavea/raster-vision'),\n ProjectLink('Issue Tracker',\n 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n ProjectLink('Azavea', 'https://www.azavea.com/'),\n ],\n 'css_files': [\n '_static/rastervision.css',\n 'https://media.readthedocs.org/css/badge_only.css'\n ]\n}\nhtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'searchbox.html'],\n '**': [\n 'project.html', 'localtoc.html', 'relations.html', 'versions.html',\n 'searchbox.html'\n ],\n}\nsinglehtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'localtoc.html']\n}\nhtml_static_path = ['_static']\nhtml_favicon = 'img/raster-vision-icon.png'\nhtml_logo = 'img/raster-vision-logo.png'\nhtml_title = 'Raster Vision Documentation ({})'.format(version)\nhtml_show_sourcelink = False\nhtml_domain_indices = False\nhtml_experimental_html5_writer = True\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'RasterVisiondoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'RasterVision.tex', 'Raster Vision Documentation', 'Azavea',\n 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'RasterVisoin-{}.tex', html_title, [author],\n 'manual')]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'RasterVision', 'Raster Vision Documentation', author,\n 'RasterVision', 'One line description of project.', 'Miscellaneous'),\n]\n\n# -- Extension configuration -------------------------------------------------\n\nprogramoutput_prompt_template = '> {command}\\n{output}'\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}], "after_files": [{"content": "# flake8: noqa\n\nfrom pallets_sphinx_themes import ProjectLink, get_version\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Raster Vision'\ncopyright = '2018, Azavea'\nauthor = 'Azavea'\n\n# The short X.Y version\nversion = '0.13'\n# The full version, including alpha/beta/rc tags\nrelease = '0.13.1'\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'pallets_sphinx_themes',\n 'sphinx.ext.napoleon',\n]\n\n# https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules\nimport sys\nfrom unittest.mock import MagicMock\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return MagicMock()\n\n\nMOCK_MODULES = ['pyproj', 'h5py', 'osgeo', 'mask_to_polygons']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\nautodoc_mock_imports = ['torch', 'torchvision', 'pycocotools']\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'sphinx'\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = 'click'\nhtml_theme_options = {'index_sidebar_logo': False}\nhtml_context = {\n 'project_links': [\n ProjectLink('Quickstart', 'quickstart.html'),\n ProjectLink('Documentation TOC', 'index.html#documentation'),\n ProjectLink('Examples', 'examples.html'),\n ProjectLink('Config API Reference', 'index.html#api'),\n ProjectLink('AWS Batch Setup', 'cloudformation.html'),\n ProjectLink('Project Website', 'https://rastervision.io/'),\n ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),\n ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),\n ProjectLink('Discussion Forum',\n 'https://github.com/azavea/raster-vision/discussions'),\n ProjectLink('Issue Tracker',\n 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n ProjectLink('Azavea', 'https://www.azavea.com/'),\n ],\n 'css_files': [\n '_static/rastervision.css',\n 'https://media.readthedocs.org/css/badge_only.css'\n ]\n}\nhtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'searchbox.html'],\n '**': [\n 'project.html', 'localtoc.html', 'relations.html', 'versions.html',\n 'searchbox.html'\n ],\n}\nsinglehtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'localtoc.html']\n}\nhtml_static_path = ['_static']\nhtml_favicon = 'img/raster-vision-icon.png'\nhtml_logo = 'img/raster-vision-logo.png'\nhtml_title = 'Raster Vision Documentation ({})'.format(version)\nhtml_show_sourcelink = False\nhtml_domain_indices = False\nhtml_experimental_html5_writer = True\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'RasterVisiondoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'RasterVision.tex', 'Raster Vision Documentation', 'Azavea',\n 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'RasterVisoin-{}.tex', html_title, [author],\n 'manual')]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'RasterVision', 'Raster Vision Documentation', author,\n 'RasterVision', 'One line description of project.', 'Miscellaneous'),\n]\n\n# -- Extension configuration -------------------------------------------------\n\nprogramoutput_prompt_template = '> {command}\\n{output}'\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}]} | 2,465 | 185 |
gh_patches_debug_17267 | rasdani/github-patches | git_diff | pulp__pulpcore-239 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix bug where Last-Modified header was being updated on duplicate package uploads
Fixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads.
Closes #5149
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3 with open('README.md') as f:
4 long_description = f.read()
5
6 requirements = [
7 'coreapi',
8 'Django~=2.2', # LTS version, switch only if we have a compelling reason to
9 'django-filter',
10 'djangorestframework',
11 'djangorestframework-queryfields',
12 'drf-nested-routers',
13 'drf-yasg',
14 'gunicorn',
15 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
16 'PyYAML',
17 'rq~=1.0',
18 'redis<3.2.0',
19 'setuptools',
20 'dynaconf~=2.0',
21 'whitenoise',
22 ]
23
24 setup(
25 name='pulpcore',
26 version='3.0.0rc5.dev',
27 description='Pulp Django Application and Related Modules',
28 long_description=long_description,
29 long_description_content_type="text/markdown",
30 license='GPLv2+',
31 packages=find_packages(exclude=['test']),
32 author='Pulp Team',
33 author_email='[email protected]',
34 url='http://www.pulpproject.org',
35 python_requires='>=3.6',
36 install_requires=requirements,
37 extras_require={
38 'postgres': ['psycopg2-binary'],
39 'mysql': ['mysqlclient']
40 },
41 include_package_data=True,
42 classifiers=(
43 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
44 'Operating System :: POSIX :: Linux',
45 'Development Status :: 4 - Beta',
46 'Framework :: Django',
47 'Programming Language :: Python',
48 'Programming Language :: Python :: 3',
49 'Programming Language :: Python :: 3.6',
50 'Programming Language :: Python :: 3.7',
51 ),
52 scripts=['bin/pulp-content'],
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,21 +4,21 @@
long_description = f.read()
requirements = [
- 'coreapi',
- 'Django~=2.2', # LTS version, switch only if we have a compelling reason to
- 'django-filter',
- 'djangorestframework',
- 'djangorestframework-queryfields',
- 'drf-nested-routers',
- 'drf-yasg',
- 'gunicorn',
+ 'coreapi~=2.3.3',
+ 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to
+ 'django-filter~=2.2.0',
+ 'djangorestframework~=3.10.2',
+ 'djangorestframework-queryfields~=1.0.0',
+ 'drf-nested-routers~=0.91.0',
+ 'drf-yasg~=1.16.1',
+ 'gunicorn~=19.9.0',
'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
- 'PyYAML',
- 'rq~=1.0',
- 'redis<3.2.0',
- 'setuptools',
- 'dynaconf~=2.0',
- 'whitenoise',
+ 'PyYAML~=5.1.1',
+ 'rq~=1.1.0',
+ 'redis~=3.1.0',
+ 'setuptools~=41.0.1',
+ 'dynaconf~=2.0.3',
+ 'whitenoise~=4.1.3',
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,21 +4,21 @@\n long_description = f.read()\n \n requirements = [\n- 'coreapi',\n- 'Django~=2.2', # LTS version, switch only if we have a compelling reason to\n- 'django-filter',\n- 'djangorestframework',\n- 'djangorestframework-queryfields',\n- 'drf-nested-routers',\n- 'drf-yasg',\n- 'gunicorn',\n+ 'coreapi~=2.3.3',\n+ 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n+ 'django-filter~=2.2.0',\n+ 'djangorestframework~=3.10.2',\n+ 'djangorestframework-queryfields~=1.0.0',\n+ 'drf-nested-routers~=0.91.0',\n+ 'drf-yasg~=1.16.1',\n+ 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n- 'PyYAML',\n- 'rq~=1.0',\n- 'redis<3.2.0',\n- 'setuptools',\n- 'dynaconf~=2.0',\n- 'whitenoise',\n+ 'PyYAML~=5.1.1',\n+ 'rq~=1.1.0',\n+ 'redis~=3.1.0',\n+ 'setuptools~=41.0.1',\n+ 'dynaconf~=2.0.3',\n+ 'whitenoise~=4.1.3',\n ]\n \n setup(\n", "issue": "Fix bug where Last-Modified header was being updated on duplicate package uploads\nFixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads.\r\n\r\nCloses #5149\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi',\n 'Django~=2.2', # LTS version, switch only if we have a compelling reason to\n 'django-filter',\n 'djangorestframework',\n 'djangorestframework-queryfields',\n 'drf-nested-routers',\n 'drf-yasg',\n 'gunicorn',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'PyYAML',\n 'rq~=1.0',\n 'redis<3.2.0',\n 'setuptools',\n 'dynaconf~=2.0',\n 'whitenoise',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc5.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n extras_require={\n 'postgres': ['psycopg2-binary'],\n 'mysql': ['mysqlclient']\n },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi~=2.3.3',\n 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n 'django-filter~=2.2.0',\n 'djangorestframework~=3.10.2',\n 'djangorestframework-queryfields~=1.0.0',\n 'drf-nested-routers~=0.91.0',\n 'drf-yasg~=1.16.1',\n 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'PyYAML~=5.1.1',\n 'rq~=1.1.0',\n 'redis~=3.1.0',\n 'setuptools~=41.0.1',\n 'dynaconf~=2.0.3',\n 'whitenoise~=4.1.3',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc5.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n extras_require={\n 'postgres': ['psycopg2-binary'],\n 'mysql': ['mysqlclient']\n },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}]} | 840 | 425 |
gh_patches_debug_8118 | rasdani/github-patches | git_diff | arviz-devs__arviz-1566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add an examples of coords argument in documentation
**Describe the bug**
There's this fancy coords argument but not a great explanation on how to use it in the code examples below
https://arviz-devs.github.io/arviz/api/generated/arviz.plot_posterior.html

**To Reproduce**
Look at the doc, notice the lack of coords documentation in the code example
**Expected behavior**
Theres an example in the code below
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/posteriorplot.py`
Content:
```
1 """Plot posterior densities."""
2 from ..data import convert_to_dataset
3 from ..rcparams import rcParams
4 from ..utils import _var_names, get_coords
5 from .plot_utils import default_grid, filter_plotters_list, get_plotting_function, xarray_var_iter
6
7
8 def plot_posterior(
9 data,
10 var_names=None,
11 filter_vars=None,
12 transform=None,
13 coords=None,
14 grid=None,
15 figsize=None,
16 textsize=None,
17 hdi_prob=None,
18 multimodal=False,
19 skipna=False,
20 round_to=None,
21 point_estimate="auto",
22 group="posterior",
23 rope=None,
24 ref_val=None,
25 kind="kde",
26 bw="default",
27 circular=False,
28 bins=None,
29 ax=None,
30 backend=None,
31 backend_kwargs=None,
32 show=None,
33 **kwargs
34 ):
35 """Plot Posterior densities in the style of John K. Kruschke's book.
36
37 Parameters
38 ----------
39 data: obj
40 Any object that can be converted to an az.InferenceData object
41 Refer to documentation of az.convert_to_dataset for details
42 var_names: list of variable names
43 Variables to be plotted, two variables are required. Prefix the variables by `~`
44 when you want to exclude them from the plot.
45 filter_vars: {None, "like", "regex"}, optional, default=None
46 If `None` (default), interpret var_names as the real variables names. If "like",
47 interpret var_names as substrings of the real variables names. If "regex",
48 interpret var_names as regular expressions on the real variables names. A la
49 `pandas.filter`.
50 transform: callable
51 Function to transform data (defaults to None i.e.the identity function)
52 coords: mapping, optional
53 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
54 grid : tuple
55 Number of rows and columns. Defaults to None, the rows and columns are
56 automatically inferred.
57 figsize: tuple
58 Figure size. If None it will be defined automatically.
59 textsize: float
60 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
61 on figsize.
62 hdi_prob: float, optional
63 Plots highest density interval for chosen percentage of density.
64 Use 'hide' to hide the highest density interval. Defaults to 0.94.
65 multimodal: bool
66 If true (default) it may compute more than one credible interval if the distribution is
67 multimodal and the modes are well separated.
68 skipna : bool
69 If true ignores nan values when computing the hdi and point estimates. Defaults to false.
70 round_to: int, optional
71 Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.
72 point_estimate: Optional[str]
73 Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
74 Defaults to 'auto' i.e. it falls back to default set in rcParams.
75 group: str, optional
76 Specifies which InferenceData group should be plotted. Defaults to ‘posterior’.
77 rope: tuple or dictionary of tuples
78 Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its
79 length should match the number of variables.
80 ref_val: float or dictionary of floats
81 display the percentage below and above the values in ref_val. Must be None (default),
82 a constant, a list or a dictionary like see an example below. If a list is provided, its
83 length should match the number of variables.
84 kind: str
85 Type of plot to display (kde or hist) For discrete variables this argument is ignored and
86 a histogram is always used.
87 bw: float or str, optional
88 If numeric, indicates the bandwidth and must be positive.
89 If str, indicates the method to estimate the bandwidth and must be
90 one of "scott", "silverman", "isj" or "experimental" when `circular` is False
91 and "taylor" (for now) when `circular` is True.
92 Defaults to "default" which means "experimental" when variable is not circular
93 and "taylor" when it is. Only works if `kind == kde`.
94 circular: bool, optional
95 If True, it interprets the values passed are from a circular variable measured in radians
96 and a circular KDE is used. Only valid for 1D KDE. Defaults to False.
97 Only works if `kind == kde`.
98 bins: integer or sequence or 'auto', optional
99 Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works
100 if `kind == hist`. If None (default) it will use `auto` for continuous variables and
101 `range(xmin, xmax + 1)` for discrete variables.
102 ax: numpy array-like of matplotlib axes or bokeh figures, optional
103 A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
104 its own array of plot areas (and return it).
105 backend: str, optional
106 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
107 backend_kwargs: bool, optional
108 These are kwargs specific to the backend being used. For additional documentation
109 check the plotting method of the backend.
110 show: bool, optional
111 Call backend show function.
112 **kwargs
113 Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.
114
115 Returns
116 -------
117 axes: matplotlib axes or bokeh figures
118
119 Examples
120 --------
121 Show a default kernel density plot following style of John Kruschke
122
123 .. plot::
124 :context: close-figs
125
126 >>> import arviz as az
127 >>> data = az.load_arviz_data('centered_eight')
128 >>> az.plot_posterior(data)
129
130 Plot subset variables by specifying variable name exactly
131
132 .. plot::
133 :context: close-figs
134
135 >>> az.plot_posterior(data, var_names=['mu'])
136
137 Plot Region of Practical Equivalence (rope) and select variables with regular expressions
138
139 .. plot::
140 :context: close-figs
141
142 >>> az.plot_posterior(data, var_names=['mu', '^the'], filter_vars="regex", rope=(-1, 1))
143
144 Plot Region of Practical Equivalence for selected distributions
145
146 .. plot::
147 :context: close-figs
148
149 >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}
150 >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)
151
152
153 Add reference lines
154
155 .. plot::
156 :context: close-figs
157
158 >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)
159
160 Show point estimate of distribution
161
162 .. plot::
163 :context: close-figs
164
165 >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')
166
167 Show reference values using variable names and coordinates
168
169 .. plot::
170 :context: close-figs
171
172 >>> az.plot_posterior(data, ref_val= {"theta": [{"school": "Deerfield", "ref_val": 4},
173 ... {"school": "Choate", "ref_val": 3}]})
174
175 Show reference values using a list
176
177 .. plot::
178 :context: close-figs
179
180 >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])
181
182
183 Plot posterior as a histogram
184
185 .. plot::
186 :context: close-figs
187
188 >>> az.plot_posterior(data, var_names=['mu'], kind='hist')
189
190 Change size of highest density interval
191
192 .. plot::
193 :context: close-figs
194
195 >>> az.plot_posterior(data, var_names=['mu'], hdi_prob=.75)
196 """
197 data = convert_to_dataset(data, group=group)
198 if transform is not None:
199 data = transform(data)
200 var_names = _var_names(var_names, data, filter_vars)
201
202 if coords is None:
203 coords = {}
204
205 if hdi_prob is None:
206 hdi_prob = rcParams["stats.hdi_prob"]
207 elif hdi_prob not in (None, "hide"):
208 if not 1 >= hdi_prob > 0:
209 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
210
211 if point_estimate == "auto":
212 point_estimate = rcParams["plot.point_estimate"]
213 elif point_estimate not in {"mean", "median", "mode", None}:
214 raise ValueError("The value of point_estimate must be either mean, median, mode or None.")
215
216 plotters = filter_plotters_list(
217 list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),
218 "plot_posterior",
219 )
220 length_plotters = len(plotters)
221 rows, cols = default_grid(length_plotters, grid=grid)
222
223 posteriorplot_kwargs = dict(
224 ax=ax,
225 length_plotters=length_plotters,
226 rows=rows,
227 cols=cols,
228 figsize=figsize,
229 plotters=plotters,
230 bw=bw,
231 circular=circular,
232 bins=bins,
233 kind=kind,
234 point_estimate=point_estimate,
235 round_to=round_to,
236 hdi_prob=hdi_prob,
237 multimodal=multimodal,
238 skipna=skipna,
239 textsize=textsize,
240 ref_val=ref_val,
241 rope=rope,
242 kwargs=kwargs,
243 backend_kwargs=backend_kwargs,
244 show=show,
245 )
246
247 if backend is None:
248 backend = rcParams["plot.backend"]
249 backend = backend.lower()
250
251 # TODO: Add backend kwargs
252 plot = get_plotting_function("plot_posterior", "posteriorplot", backend)
253 ax = plot(**posteriorplot_kwargs)
254 return ax
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py
--- a/arviz/plots/posteriorplot.py
+++ b/arviz/plots/posteriorplot.py
@@ -149,6 +149,13 @@
>>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}
>>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)
+ Using `coords` argument to plot only a subset of data
+
+ .. plot::
+ :context: close-figs
+
+ >>> coords = {"school": ["Choate","Phillips Exeter"]}
+ >>> az.plot_posterior(data, var_names=["mu", "theta"], coords=coords)
Add reference lines
| {"golden_diff": "diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py\n--- a/arviz/plots/posteriorplot.py\n+++ b/arviz/plots/posteriorplot.py\n@@ -149,6 +149,13 @@\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n \n+ Using `coords` argument to plot only a subset of data\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> coords = {\"school\": [\"Choate\",\"Phillips Exeter\"]}\n+ >>> az.plot_posterior(data, var_names=[\"mu\", \"theta\"], coords=coords)\n \n Add reference lines\n", "issue": "Add an examples of coords argument in documentation\n**Describe the bug**\r\nThere's this fancy coords argument but not a great explanation on how to use it in the code examples below\r\nhttps://arviz-devs.github.io/arviz/api/generated/arviz.plot_posterior.html\r\n\r\n\r\n\r\n\r\n**To Reproduce**\r\nLook at the doc, notice the lack of coords documentation in the code example\r\n\r\n**Expected behavior**\r\nTheres an example in the code below\r\n\n", "before_files": [{"content": "\"\"\"Plot posterior densities.\"\"\"\nfrom ..data import convert_to_dataset\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import default_grid, filter_plotters_list, get_plotting_function, xarray_var_iter\n\n\ndef plot_posterior(\n data,\n var_names=None,\n filter_vars=None,\n transform=None,\n coords=None,\n grid=None,\n figsize=None,\n textsize=None,\n hdi_prob=None,\n multimodal=False,\n skipna=False,\n round_to=None,\n point_estimate=\"auto\",\n group=\"posterior\",\n rope=None,\n ref_val=None,\n kind=\"kde\",\n bw=\"default\",\n circular=False,\n bins=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n **kwargs\n):\n \"\"\"Plot Posterior densities in the style of John K. Kruschke's book.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names: list of variable names\n Variables to be plotted, two variables are required. Prefix the variables by `~`\n when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n transform: callable\n Function to transform data (defaults to None i.e.the identity function)\n coords: mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n grid : tuple\n Number of rows and columns. Defaults to None, the rows and columns are\n automatically inferred.\n figsize: tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n hdi_prob: float, optional\n Plots highest density interval for chosen percentage of density.\n Use 'hide' to hide the highest density interval. Defaults to 0.94.\n multimodal: bool\n If true (default) it may compute more than one credible interval if the distribution is\n multimodal and the modes are well separated.\n skipna : bool\n If true ignores nan values when computing the hdi and point estimates. Defaults to false.\n round_to: int, optional\n Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.\n point_estimate: Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n group: str, optional\n Specifies which InferenceData group should be plotted. Defaults to \u2018posterior\u2019.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its\n length should match the number of variables.\n ref_val: float or dictionary of floats\n display the percentage below and above the values in ref_val. Must be None (default),\n a constant, a list or a dictionary like see an example below. If a list is provided, its\n length should match the number of variables.\n kind: str\n Type of plot to display (kde or hist) For discrete variables this argument is ignored and\n a histogram is always used.\n bw: float or str, optional\n If numeric, indicates the bandwidth and must be positive.\n If str, indicates the method to estimate the bandwidth and must be\n one of \"scott\", \"silverman\", \"isj\" or \"experimental\" when `circular` is False\n and \"taylor\" (for now) when `circular` is True.\n Defaults to \"default\" which means \"experimental\" when variable is not circular\n and \"taylor\" when it is. Only works if `kind == kde`.\n circular: bool, optional\n If True, it interprets the values passed are from a circular variable measured in radians\n and a circular KDE is used. Only valid for 1D KDE. Defaults to False.\n Only works if `kind == kde`.\n bins: integer or sequence or 'auto', optional\n Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works\n if `kind == hist`. If None (default) it will use `auto` for continuous variables and\n `range(xmin, xmax + 1)` for discrete variables.\n ax: numpy array-like of matplotlib axes or bokeh figures, optional\n A 2D array of locations into which to plot the densities. If not supplied, Arviz will create\n its own array of plot areas (and return it).\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show: bool, optional\n Call backend show function.\n **kwargs\n Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.\n\n Returns\n -------\n axes: matplotlib axes or bokeh figures\n\n Examples\n --------\n Show a default kernel density plot following style of John Kruschke\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_posterior(data)\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'])\n\n Plot Region of Practical Equivalence (rope) and select variables with regular expressions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', '^the'], filter_vars=\"regex\", rope=(-1, 1))\n\n Plot Region of Practical Equivalence for selected distributions\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n\n\n Add reference lines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)\n\n Show point estimate of distribution\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')\n\n Show reference values using variable names and coordinates\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val= {\"theta\": [{\"school\": \"Deerfield\", \"ref_val\": 4},\n ... {\"school\": \"Choate\", \"ref_val\": 3}]})\n\n Show reference values using a list\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])\n\n\n Plot posterior as a histogram\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], kind='hist')\n\n Change size of highest density interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], hdi_prob=.75)\n \"\"\"\n data = convert_to_dataset(data, group=group)\n if transform is not None:\n data = transform(data)\n var_names = _var_names(var_names, data, filter_vars)\n\n if coords is None:\n coords = {}\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n elif hdi_prob not in (None, \"hide\"):\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n if point_estimate == \"auto\":\n point_estimate = rcParams[\"plot.point_estimate\"]\n elif point_estimate not in {\"mean\", \"median\", \"mode\", None}:\n raise ValueError(\"The value of point_estimate must be either mean, median, mode or None.\")\n\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),\n \"plot_posterior\",\n )\n length_plotters = len(plotters)\n rows, cols = default_grid(length_plotters, grid=grid)\n\n posteriorplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n plotters=plotters,\n bw=bw,\n circular=circular,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n skipna=skipna,\n textsize=textsize,\n ref_val=ref_val,\n rope=rope,\n kwargs=kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_posterior\", \"posteriorplot\", backend)\n ax = plot(**posteriorplot_kwargs)\n return ax\n", "path": "arviz/plots/posteriorplot.py"}], "after_files": [{"content": "\"\"\"Plot posterior densities.\"\"\"\nfrom ..data import convert_to_dataset\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import default_grid, filter_plotters_list, get_plotting_function, xarray_var_iter\n\n\ndef plot_posterior(\n data,\n var_names=None,\n filter_vars=None,\n transform=None,\n coords=None,\n grid=None,\n figsize=None,\n textsize=None,\n hdi_prob=None,\n multimodal=False,\n skipna=False,\n round_to=None,\n point_estimate=\"auto\",\n group=\"posterior\",\n rope=None,\n ref_val=None,\n kind=\"kde\",\n bw=\"default\",\n circular=False,\n bins=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n **kwargs\n):\n \"\"\"Plot Posterior densities in the style of John K. Kruschke's book.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names: list of variable names\n Variables to be plotted, two variables are required. Prefix the variables by `~`\n when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n transform: callable\n Function to transform data (defaults to None i.e.the identity function)\n coords: mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n grid : tuple\n Number of rows and columns. Defaults to None, the rows and columns are\n automatically inferred.\n figsize: tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n hdi_prob: float, optional\n Plots highest density interval for chosen percentage of density.\n Use 'hide' to hide the highest density interval. Defaults to 0.94.\n multimodal: bool\n If true (default) it may compute more than one credible interval if the distribution is\n multimodal and the modes are well separated.\n skipna : bool\n If true ignores nan values when computing the hdi and point estimates. Defaults to false.\n round_to: int, optional\n Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.\n point_estimate: Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n group: str, optional\n Specifies which InferenceData group should be plotted. Defaults to \u2018posterior\u2019.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its\n length should match the number of variables.\n ref_val: float or dictionary of floats\n display the percentage below and above the values in ref_val. Must be None (default),\n a constant, a list or a dictionary like see an example below. If a list is provided, its\n length should match the number of variables.\n kind: str\n Type of plot to display (kde or hist) For discrete variables this argument is ignored and\n a histogram is always used.\n bw: float or str, optional\n If numeric, indicates the bandwidth and must be positive.\n If str, indicates the method to estimate the bandwidth and must be\n one of \"scott\", \"silverman\", \"isj\" or \"experimental\" when `circular` is False\n and \"taylor\" (for now) when `circular` is True.\n Defaults to \"default\" which means \"experimental\" when variable is not circular\n and \"taylor\" when it is. Only works if `kind == kde`.\n circular: bool, optional\n If True, it interprets the values passed are from a circular variable measured in radians\n and a circular KDE is used. Only valid for 1D KDE. Defaults to False.\n Only works if `kind == kde`.\n bins: integer or sequence or 'auto', optional\n Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works\n if `kind == hist`. If None (default) it will use `auto` for continuous variables and\n `range(xmin, xmax + 1)` for discrete variables.\n ax: numpy array-like of matplotlib axes or bokeh figures, optional\n A 2D array of locations into which to plot the densities. If not supplied, Arviz will create\n its own array of plot areas (and return it).\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show: bool, optional\n Call backend show function.\n **kwargs\n Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.\n\n Returns\n -------\n axes: matplotlib axes or bokeh figures\n\n Examples\n --------\n Show a default kernel density plot following style of John Kruschke\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_posterior(data)\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'])\n\n Plot Region of Practical Equivalence (rope) and select variables with regular expressions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', '^the'], filter_vars=\"regex\", rope=(-1, 1))\n\n Plot Region of Practical Equivalence for selected distributions\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n\n Using `coords` argument to plot only a subset of data\n\n .. plot::\n :context: close-figs\n\n >>> coords = {\"school\": [\"Choate\",\"Phillips Exeter\"]}\n >>> az.plot_posterior(data, var_names=[\"mu\", \"theta\"], coords=coords)\n\n Add reference lines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)\n\n Show point estimate of distribution\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')\n\n Show reference values using variable names and coordinates\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val= {\"theta\": [{\"school\": \"Deerfield\", \"ref_val\": 4},\n ... {\"school\": \"Choate\", \"ref_val\": 3}]})\n\n Show reference values using a list\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])\n\n\n Plot posterior as a histogram\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], kind='hist')\n\n Change size of highest density interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], hdi_prob=.75)\n \"\"\"\n data = convert_to_dataset(data, group=group)\n if transform is not None:\n data = transform(data)\n var_names = _var_names(var_names, data, filter_vars)\n\n if coords is None:\n coords = {}\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n elif hdi_prob not in (None, \"hide\"):\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n if point_estimate == \"auto\":\n point_estimate = rcParams[\"plot.point_estimate\"]\n elif point_estimate not in {\"mean\", \"median\", \"mode\", None}:\n raise ValueError(\"The value of point_estimate must be either mean, median, mode or None.\")\n\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),\n \"plot_posterior\",\n )\n length_plotters = len(plotters)\n rows, cols = default_grid(length_plotters, grid=grid)\n\n posteriorplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n plotters=plotters,\n bw=bw,\n circular=circular,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n skipna=skipna,\n textsize=textsize,\n ref_val=ref_val,\n rope=rope,\n kwargs=kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_posterior\", \"posteriorplot\", backend)\n ax = plot(**posteriorplot_kwargs)\n return ax\n", "path": "arviz/plots/posteriorplot.py"}]} | 3,277 | 196 |
gh_patches_debug_16858 | rasdani/github-patches | git_diff | cupy__cupy-2078 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cupy.ascontiguousarray behavior different from numpy
```py
>>> np.ascontiguousarray(np.array(3))
array([3])
>>> cp.ascontiguousarray(cp.array(3))
array(3)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/core/__init__.py`
Content:
```
1 from cupy.core import core # NOQA
2 from cupy.core import internal # NOQA
3
4
5 # import class and function
6 from cupy.core._errors import _AxisError # NOQA
7 from cupy.core._kernel import create_reduction_func # NOQA
8 from cupy.core._kernel import create_ufunc # NOQA
9 from cupy.core._kernel import ElementwiseKernel # NOQA
10 from cupy.core._kernel import ReductionKernel # NOQA
11 from cupy.core._kernel import ufunc # NOQA
12 from cupy.core._routines_manipulation import array_split # NOQA
13 from cupy.core._routines_manipulation import broadcast # NOQA
14 from cupy.core._routines_manipulation import broadcast_to # NOQA
15 from cupy.core._routines_manipulation import concatenate_method # NOQA
16 from cupy.core._routines_manipulation import moveaxis # NOQA
17 from cupy.core._routines_manipulation import rollaxis # NOQA
18 from cupy.core._routines_manipulation import size # NOQA'
19 from cupy.core._routines_math import absolute # NOQA
20 from cupy.core._routines_math import add # NOQA
21 from cupy.core._routines_math import angle # NOQA
22 from cupy.core._routines_math import conj # NOQA
23 from cupy.core._routines_math import divide # NOQA
24 from cupy.core._routines_math import floor_divide # NOQA
25 from cupy.core._routines_math import imag # NOQA
26 from cupy.core._routines_math import multiply # NOQA
27 from cupy.core._routines_math import negative # NOQA
28 from cupy.core._routines_math import power # NOQA
29 from cupy.core._routines_math import real # NOQA
30 from cupy.core._routines_math import remainder # NOQA
31 from cupy.core._routines_math import sqrt # NOQA
32 from cupy.core._routines_math import subtract # NOQA
33 from cupy.core._routines_math import true_divide # NOQA
34 from cupy.core._routines_statistics import nanmax # NOQA
35 from cupy.core._routines_statistics import nanmin # NOQA
36 from cupy.core.core import array # NOQA
37 from cupy.core.core import ascontiguousarray # NOQA
38 from cupy.core.core import asfortranarray # NOQA
39 from cupy.core.core import bitwise_and # NOQA
40 from cupy.core.core import bitwise_or # NOQA
41 from cupy.core.core import bitwise_xor # NOQA
42 from cupy.core.core import create_comparison # NOQA
43 from cupy.core.core import divmod # NOQA
44 from cupy.core.core import dot # NOQA
45 from cupy.core.core import elementwise_copy # NOQA
46 from cupy.core.core import elementwise_copy_where # NOQA
47 from cupy.core.core import equal # NOQA
48 from cupy.core.core import greater # NOQA
49 from cupy.core.core import greater_equal # NOQA
50 from cupy.core.core import invert # NOQA
51 from cupy.core.core import left_shift # NOQA
52 from cupy.core.core import less # NOQA
53 from cupy.core.core import less_equal # NOQA
54 from cupy.core.core import matmul # NOQA
55 from cupy.core.core import ndarray # NOQA
56 from cupy.core.core import not_equal # NOQA
57 from cupy.core.core import right_shift # NOQA
58 from cupy.core.core import tensordot_core # NOQA
59 from cupy.core.dlpack import fromDlpack # NOQA
60 from cupy.core.internal import complete_slice # NOQA
61 from cupy.core.internal import get_size # NOQA
62 from cupy.core.raw import RawKernel # NOQA
63
```
Path: `cupy/statistics/order.py`
Content:
```
1 import warnings
2
3 import cupy
4 from cupy import core
5 from cupy.core import _routines_statistics as _statistics
6 from cupy.core import fusion
7 from cupy.logic import content
8
9
10 def amin(a, axis=None, out=None, keepdims=False, dtype=None):
11 """Returns the minimum of an array or the minimum along an axis.
12
13 .. note::
14
15 When at least one element is NaN, the corresponding min value will be
16 NaN.
17
18 Args:
19 a (cupy.ndarray): Array to take the minimum.
20 axis (int): Along which axis to take the minimum. The flattened array
21 is used by default.
22 out (cupy.ndarray): Output array.
23 keepdims (bool): If ``True``, the axis is remained as an axis of
24 size one.
25 dtype: Data type specifier.
26
27 Returns:
28 cupy.ndarray: The minimum of ``a``, along the axis if specified.
29
30 .. seealso:: :func:`numpy.amin`
31
32 """
33 if fusion._is_fusing():
34 if keepdims:
35 raise NotImplementedError(
36 'cupy.amin does not support `keepdims` in fusion yet.')
37 return fusion._call_reduction(_statistics.amin,
38 a, axis=axis, dtype=dtype, out=out)
39
40 # TODO(okuta): check type
41 return a.min(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
42
43
44 def amax(a, axis=None, out=None, keepdims=False, dtype=None):
45 """Returns the maximum of an array or the maximum along an axis.
46
47 .. note::
48
49 When at least one element is NaN, the corresponding min value will be
50 NaN.
51
52 Args:
53 a (cupy.ndarray): Array to take the maximum.
54 axis (int): Along which axis to take the maximum. The flattened array
55 is used by default.
56 out (cupy.ndarray): Output array.
57 keepdims (bool): If ``True``, the axis is remained as an axis of
58 size one.
59 dtype: Data type specifier.
60
61 Returns:
62 cupy.ndarray: The maximum of ``a``, along the axis if specified.
63
64 .. seealso:: :func:`numpy.amax`
65
66 """
67 if fusion._is_fusing():
68 if keepdims:
69 raise NotImplementedError(
70 'cupy.amax does not support `keepdims` in fusion yet.')
71 return fusion._call_reduction(_statistics.amax,
72 a, axis=axis, dtype=dtype, out=out)
73
74 # TODO(okuta): check type
75 return a.max(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
76
77
78 def nanmin(a, axis=None, out=None, keepdims=False):
79 """Returns the minimum of an array along an axis ignoring NaN.
80
81 When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`
82 is raised and NaN is returned.
83
84 Args:
85 a (cupy.ndarray): Array to take the minimum.
86 axis (int): Along which axis to take the minimum. The flattened array
87 is used by default.
88 out (cupy.ndarray): Output array.
89 keepdims (bool): If ``True``, the axis is remained as an axis of
90 size one.
91
92 Returns:
93 cupy.ndarray: The minimum of ``a``, along the axis if specified.
94
95 .. seealso:: :func:`numpy.nanmin`
96
97 """
98 res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)
99 if content.isnan(res).any():
100 warnings.warn('All-NaN slice encountered', RuntimeWarning)
101 return res
102
103
104 def nanmax(a, axis=None, out=None, keepdims=False):
105 """Returns the maximum of an array along an axis ignoring NaN.
106
107 When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`
108 is raised and NaN is returned.
109
110 Args:
111 a (cupy.ndarray): Array to take the maximum.
112 axis (int): Along which axis to take the maximum. The flattened array
113 is used by default.
114 out (cupy.ndarray): Output array.
115 keepdims (bool): If ``True``, the axis is remained as an axis of
116 size one.
117
118 Returns:
119 cupy.ndarray: The maximum of ``a``, along the axis if specified.
120
121 .. seealso:: :func:`numpy.nanmax`
122
123 """
124 res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)
125 if content.isnan(res).any():
126 warnings.warn('All-NaN slice encountered', RuntimeWarning)
127 return res
128
129
130 # TODO(okuta): Implement ptp
131
132
133 def percentile(a, q, axis=None, out=None, interpolation='linear',
134 keepdims=False):
135 """Computes the q-th percentile of the data along the specified axis.
136
137 Args:
138 a (cupy.ndarray): Array for which to compute percentiles.
139 q (float, tuple of floats or cupy.ndarray): Percentiles to compute
140 in the range between 0 and 100 inclusive.
141 axis (int or tuple of ints): Along which axis or axes to compute the
142 percentiles. The flattened array is used by default.
143 out (cupy.ndarray): Output array.
144 interpolation (str): Interpolation method when a quantile lies between
145 two data points. ``linear`` interpolation is used by default.
146 Supported interpolations are``lower``, ``higher``, ``midpoint``,
147 ``nearest`` and ``linear``.
148 keepdims (bool): If ``True``, the axis is remained as an axis of
149 size one.
150
151 Returns:
152 cupy.ndarray: The percentiles of ``a``, along the axis if specified.
153
154 .. seealso:: :func:`numpy.percentile`
155
156 """
157 q = cupy.asarray(q, dtype=a.dtype)
158 if q.ndim == 0:
159 q = q[None]
160 zerod = True
161 else:
162 zerod = False
163 if q.ndim > 1:
164 raise ValueError('Expected q to have a dimension of 1.\n'
165 'Actual: {0} != 1'.format(q.ndim))
166
167 if keepdims:
168 if axis is None:
169 keepdim = (1,) * a.ndim
170 else:
171 keepdim = list(a.shape)
172 for ax in axis:
173 keepdim[ax % a.ndim] = 1
174 keepdim = tuple(keepdim)
175
176 # Copy a since we need it sorted but without modifying the original array
177 if isinstance(axis, int):
178 axis = axis,
179 if axis is None:
180 ap = a.flatten()
181 nkeep = 0
182 else:
183 # Reduce axes from a and put them last
184 axis = tuple(ax % a.ndim for ax in axis)
185 keep = set(range(a.ndim)) - set(axis)
186 nkeep = len(keep)
187 for i, s in enumerate(sorted(keep)):
188 a = a.swapaxes(i, s)
189 ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()
190
191 axis = -1
192 ap.sort(axis=axis)
193 Nx = ap.shape[axis]
194 indices = q * 0.01 * (Nx - 1.) # percents to decimals
195
196 if interpolation == 'lower':
197 indices = cupy.floor(indices).astype(cupy.int32)
198 elif interpolation == 'higher':
199 indices = cupy.ceil(indices).astype(cupy.int32)
200 elif interpolation == 'midpoint':
201 indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))
202 elif interpolation == 'nearest':
203 # TODO(hvy): Implement nearest using around
204 raise ValueError('\'nearest\' interpolation is not yet supported. '
205 'Please use any other interpolation method.')
206 elif interpolation == 'linear':
207 pass
208 else:
209 raise ValueError('Unexpected interpolation method.\n'
210 'Actual: \'{0}\' not in (\'linear\', \'lower\', '
211 '\'higher\', \'midpoint\')'.format(interpolation))
212
213 if indices.dtype == cupy.int32:
214 ret = cupy.rollaxis(ap, axis)
215 ret = ret.take(indices, axis=0, out=out)
216 else:
217 if out is None:
218 ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)
219 else:
220 ret = cupy.rollaxis(out, 0, out.ndim)
221
222 cupy.ElementwiseKernel(
223 'S idx, raw T a, raw int32 offset', 'U ret',
224 '''
225 ptrdiff_t idx_below = floor(idx);
226 U weight_above = idx - idx_below;
227
228 ptrdiff_t offset_i = _ind.get()[0] * offset;
229 ret = a[offset_i + idx_below] * (1.0 - weight_above)
230 + a[offset_i + idx_below + 1] * weight_above;
231 ''',
232 'percentile_weightnening'
233 )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)
234 ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis
235
236 if zerod:
237 ret = ret.squeeze(0)
238 if keepdims:
239 if q.size > 1:
240 keepdim = (-1,) + keepdim
241 ret = ret.reshape(keepdim)
242
243 return cupy.ascontiguousarray(ret)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/core/__init__.py b/cupy/core/__init__.py
--- a/cupy/core/__init__.py
+++ b/cupy/core/__init__.py
@@ -33,6 +33,8 @@
from cupy.core._routines_math import true_divide # NOQA
from cupy.core._routines_statistics import nanmax # NOQA
from cupy.core._routines_statistics import nanmin # NOQA
+from cupy.core.core import _internal_ascontiguousarray # NOQA
+from cupy.core.core import _internal_asfortranarray # NOQA
from cupy.core.core import array # NOQA
from cupy.core.core import ascontiguousarray # NOQA
from cupy.core.core import asfortranarray # NOQA
diff --git a/cupy/statistics/order.py b/cupy/statistics/order.py
--- a/cupy/statistics/order.py
+++ b/cupy/statistics/order.py
@@ -240,4 +240,4 @@
keepdim = (-1,) + keepdim
ret = ret.reshape(keepdim)
- return cupy.ascontiguousarray(ret)
+ return core._internal_ascontiguousarray(ret)
| {"golden_diff": "diff --git a/cupy/core/__init__.py b/cupy/core/__init__.py\n--- a/cupy/core/__init__.py\n+++ b/cupy/core/__init__.py\n@@ -33,6 +33,8 @@\n from cupy.core._routines_math import true_divide # NOQA\n from cupy.core._routines_statistics import nanmax # NOQA\n from cupy.core._routines_statistics import nanmin # NOQA\n+from cupy.core.core import _internal_ascontiguousarray # NOQA\n+from cupy.core.core import _internal_asfortranarray # NOQA\n from cupy.core.core import array # NOQA\n from cupy.core.core import ascontiguousarray # NOQA\n from cupy.core.core import asfortranarray # NOQA\ndiff --git a/cupy/statistics/order.py b/cupy/statistics/order.py\n--- a/cupy/statistics/order.py\n+++ b/cupy/statistics/order.py\n@@ -240,4 +240,4 @@\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n \n- return cupy.ascontiguousarray(ret)\n+ return core._internal_ascontiguousarray(ret)\n", "issue": "cupy.ascontiguousarray behavior different from numpy\n```py\r\n>>> np.ascontiguousarray(np.array(3))\r\narray([3])\r\n>>> cp.ascontiguousarray(cp.array(3))\r\narray(3)\r\n```\n", "before_files": [{"content": "from cupy.core import core # NOQA\nfrom cupy.core import internal # NOQA\n\n\n# import class and function\nfrom cupy.core._errors import _AxisError # NOQA\nfrom cupy.core._kernel import create_reduction_func # NOQA\nfrom cupy.core._kernel import create_ufunc # NOQA\nfrom cupy.core._kernel import ElementwiseKernel # NOQA\nfrom cupy.core._kernel import ReductionKernel # NOQA\nfrom cupy.core._kernel import ufunc # NOQA\nfrom cupy.core._routines_manipulation import array_split # NOQA\nfrom cupy.core._routines_manipulation import broadcast # NOQA\nfrom cupy.core._routines_manipulation import broadcast_to # NOQA\nfrom cupy.core._routines_manipulation import concatenate_method # NOQA\nfrom cupy.core._routines_manipulation import moveaxis # NOQA\nfrom cupy.core._routines_manipulation import rollaxis # NOQA\nfrom cupy.core._routines_manipulation import size # NOQA'\nfrom cupy.core._routines_math import absolute # NOQA\nfrom cupy.core._routines_math import add # NOQA\nfrom cupy.core._routines_math import angle # NOQA\nfrom cupy.core._routines_math import conj # NOQA\nfrom cupy.core._routines_math import divide # NOQA\nfrom cupy.core._routines_math import floor_divide # NOQA\nfrom cupy.core._routines_math import imag # NOQA\nfrom cupy.core._routines_math import multiply # NOQA\nfrom cupy.core._routines_math import negative # NOQA\nfrom cupy.core._routines_math import power # NOQA\nfrom cupy.core._routines_math import real # NOQA\nfrom cupy.core._routines_math import remainder # NOQA\nfrom cupy.core._routines_math import sqrt # NOQA\nfrom cupy.core._routines_math import subtract # NOQA\nfrom cupy.core._routines_math import true_divide # NOQA\nfrom cupy.core._routines_statistics import nanmax # NOQA\nfrom cupy.core._routines_statistics import nanmin # NOQA\nfrom cupy.core.core import array # NOQA\nfrom cupy.core.core import ascontiguousarray # NOQA\nfrom cupy.core.core import asfortranarray # NOQA\nfrom cupy.core.core import bitwise_and # NOQA\nfrom cupy.core.core import bitwise_or # NOQA\nfrom cupy.core.core import bitwise_xor # NOQA\nfrom cupy.core.core import create_comparison # NOQA\nfrom cupy.core.core import divmod # NOQA\nfrom cupy.core.core import dot # NOQA\nfrom cupy.core.core import elementwise_copy # NOQA\nfrom cupy.core.core import elementwise_copy_where # NOQA\nfrom cupy.core.core import equal # NOQA\nfrom cupy.core.core import greater # NOQA\nfrom cupy.core.core import greater_equal # NOQA\nfrom cupy.core.core import invert # NOQA\nfrom cupy.core.core import left_shift # NOQA\nfrom cupy.core.core import less # NOQA\nfrom cupy.core.core import less_equal # NOQA\nfrom cupy.core.core import matmul # NOQA\nfrom cupy.core.core import ndarray # NOQA\nfrom cupy.core.core import not_equal # NOQA\nfrom cupy.core.core import right_shift # NOQA\nfrom cupy.core.core import tensordot_core # NOQA\nfrom cupy.core.dlpack import fromDlpack # NOQA\nfrom cupy.core.internal import complete_slice # NOQA\nfrom cupy.core.internal import get_size # NOQA\nfrom cupy.core.raw import RawKernel # NOQA\n", "path": "cupy/core/__init__.py"}, {"content": "import warnings\n\nimport cupy\nfrom cupy import core\nfrom cupy.core import _routines_statistics as _statistics\nfrom cupy.core import fusion\nfrom cupy.logic import content\n\n\ndef amin(a, axis=None, out=None, keepdims=False, dtype=None):\n \"\"\"Returns the minimum of an array or the minimum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n if fusion._is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amin does not support `keepdims` in fusion yet.')\n return fusion._call_reduction(_statistics.amin,\n a, axis=axis, dtype=dtype, out=out)\n\n # TODO(okuta): check type\n return a.min(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\ndef amax(a, axis=None, out=None, keepdims=False, dtype=None):\n \"\"\"Returns the maximum of an array or the maximum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amax`\n\n \"\"\"\n if fusion._is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amax does not support `keepdims` in fusion yet.')\n return fusion._call_reduction(_statistics.amax,\n a, axis=axis, dtype=dtype, out=out)\n\n # TODO(okuta): check type\n return a.max(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\ndef nanmin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.nanmin`\n\n \"\"\"\n res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any():\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef nanmax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.nanmax`\n\n \"\"\"\n res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any():\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\n# TODO(okuta): Implement ptp\n\n\ndef percentile(a, q, axis=None, out=None, interpolation='linear',\n keepdims=False):\n \"\"\"Computes the q-th percentile of the data along the specified axis.\n\n Args:\n a (cupy.ndarray): Array for which to compute percentiles.\n q (float, tuple of floats or cupy.ndarray): Percentiles to compute\n in the range between 0 and 100 inclusive.\n axis (int or tuple of ints): Along which axis or axes to compute the\n percentiles. The flattened array is used by default.\n out (cupy.ndarray): Output array.\n interpolation (str): Interpolation method when a quantile lies between\n two data points. ``linear`` interpolation is used by default.\n Supported interpolations are``lower``, ``higher``, ``midpoint``,\n ``nearest`` and ``linear``.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The percentiles of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.percentile`\n\n \"\"\"\n q = cupy.asarray(q, dtype=a.dtype)\n if q.ndim == 0:\n q = q[None]\n zerod = True\n else:\n zerod = False\n if q.ndim > 1:\n raise ValueError('Expected q to have a dimension of 1.\\n'\n 'Actual: {0} != 1'.format(q.ndim))\n\n if keepdims:\n if axis is None:\n keepdim = (1,) * a.ndim\n else:\n keepdim = list(a.shape)\n for ax in axis:\n keepdim[ax % a.ndim] = 1\n keepdim = tuple(keepdim)\n\n # Copy a since we need it sorted but without modifying the original array\n if isinstance(axis, int):\n axis = axis,\n if axis is None:\n ap = a.flatten()\n nkeep = 0\n else:\n # Reduce axes from a and put them last\n axis = tuple(ax % a.ndim for ax in axis)\n keep = set(range(a.ndim)) - set(axis)\n nkeep = len(keep)\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()\n\n axis = -1\n ap.sort(axis=axis)\n Nx = ap.shape[axis]\n indices = q * 0.01 * (Nx - 1.) # percents to decimals\n\n if interpolation == 'lower':\n indices = cupy.floor(indices).astype(cupy.int32)\n elif interpolation == 'higher':\n indices = cupy.ceil(indices).astype(cupy.int32)\n elif interpolation == 'midpoint':\n indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))\n elif interpolation == 'nearest':\n # TODO(hvy): Implement nearest using around\n raise ValueError('\\'nearest\\' interpolation is not yet supported. '\n 'Please use any other interpolation method.')\n elif interpolation == 'linear':\n pass\n else:\n raise ValueError('Unexpected interpolation method.\\n'\n 'Actual: \\'{0}\\' not in (\\'linear\\', \\'lower\\', '\n '\\'higher\\', \\'midpoint\\')'.format(interpolation))\n\n if indices.dtype == cupy.int32:\n ret = cupy.rollaxis(ap, axis)\n ret = ret.take(indices, axis=0, out=out)\n else:\n if out is None:\n ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)\n else:\n ret = cupy.rollaxis(out, 0, out.ndim)\n\n cupy.ElementwiseKernel(\n 'S idx, raw T a, raw int32 offset', 'U ret',\n '''\n ptrdiff_t idx_below = floor(idx);\n U weight_above = idx - idx_below;\n\n ptrdiff_t offset_i = _ind.get()[0] * offset;\n ret = a[offset_i + idx_below] * (1.0 - weight_above)\n + a[offset_i + idx_below + 1] * weight_above;\n ''',\n 'percentile_weightnening'\n )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)\n ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis\n\n if zerod:\n ret = ret.squeeze(0)\n if keepdims:\n if q.size > 1:\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n\n return cupy.ascontiguousarray(ret)\n", "path": "cupy/statistics/order.py"}], "after_files": [{"content": "from cupy.core import core # NOQA\nfrom cupy.core import internal # NOQA\n\n\n# import class and function\nfrom cupy.core._errors import _AxisError # NOQA\nfrom cupy.core._kernel import create_reduction_func # NOQA\nfrom cupy.core._kernel import create_ufunc # NOQA\nfrom cupy.core._kernel import ElementwiseKernel # NOQA\nfrom cupy.core._kernel import ReductionKernel # NOQA\nfrom cupy.core._kernel import ufunc # NOQA\nfrom cupy.core._routines_manipulation import array_split # NOQA\nfrom cupy.core._routines_manipulation import broadcast # NOQA\nfrom cupy.core._routines_manipulation import broadcast_to # NOQA\nfrom cupy.core._routines_manipulation import concatenate_method # NOQA\nfrom cupy.core._routines_manipulation import moveaxis # NOQA\nfrom cupy.core._routines_manipulation import rollaxis # NOQA\nfrom cupy.core._routines_manipulation import size # NOQA'\nfrom cupy.core._routines_math import absolute # NOQA\nfrom cupy.core._routines_math import add # NOQA\nfrom cupy.core._routines_math import angle # NOQA\nfrom cupy.core._routines_math import conj # NOQA\nfrom cupy.core._routines_math import divide # NOQA\nfrom cupy.core._routines_math import floor_divide # NOQA\nfrom cupy.core._routines_math import imag # NOQA\nfrom cupy.core._routines_math import multiply # NOQA\nfrom cupy.core._routines_math import negative # NOQA\nfrom cupy.core._routines_math import power # NOQA\nfrom cupy.core._routines_math import real # NOQA\nfrom cupy.core._routines_math import remainder # NOQA\nfrom cupy.core._routines_math import sqrt # NOQA\nfrom cupy.core._routines_math import subtract # NOQA\nfrom cupy.core._routines_math import true_divide # NOQA\nfrom cupy.core._routines_statistics import nanmax # NOQA\nfrom cupy.core._routines_statistics import nanmin # NOQA\nfrom cupy.core.core import _internal_ascontiguousarray # NOQA\nfrom cupy.core.core import _internal_asfortranarray # NOQA\nfrom cupy.core.core import array # NOQA\nfrom cupy.core.core import ascontiguousarray # NOQA\nfrom cupy.core.core import asfortranarray # NOQA\nfrom cupy.core.core import bitwise_and # NOQA\nfrom cupy.core.core import bitwise_or # NOQA\nfrom cupy.core.core import bitwise_xor # NOQA\nfrom cupy.core.core import create_comparison # NOQA\nfrom cupy.core.core import divmod # NOQA\nfrom cupy.core.core import dot # NOQA\nfrom cupy.core.core import elementwise_copy # NOQA\nfrom cupy.core.core import elementwise_copy_where # NOQA\nfrom cupy.core.core import equal # NOQA\nfrom cupy.core.core import greater # NOQA\nfrom cupy.core.core import greater_equal # NOQA\nfrom cupy.core.core import invert # NOQA\nfrom cupy.core.core import left_shift # NOQA\nfrom cupy.core.core import less # NOQA\nfrom cupy.core.core import less_equal # NOQA\nfrom cupy.core.core import matmul # NOQA\nfrom cupy.core.core import ndarray # NOQA\nfrom cupy.core.core import not_equal # NOQA\nfrom cupy.core.core import right_shift # NOQA\nfrom cupy.core.core import tensordot_core # NOQA\nfrom cupy.core.dlpack import fromDlpack # NOQA\nfrom cupy.core.internal import complete_slice # NOQA\nfrom cupy.core.internal import get_size # NOQA\nfrom cupy.core.raw import RawKernel # NOQA\n", "path": "cupy/core/__init__.py"}, {"content": "import warnings\n\nimport cupy\nfrom cupy import core\nfrom cupy.core import _routines_statistics as _statistics\nfrom cupy.core import fusion\nfrom cupy.logic import content\n\n\ndef amin(a, axis=None, out=None, keepdims=False, dtype=None):\n \"\"\"Returns the minimum of an array or the minimum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n if fusion._is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amin does not support `keepdims` in fusion yet.')\n return fusion._call_reduction(_statistics.amin,\n a, axis=axis, dtype=dtype, out=out)\n\n # TODO(okuta): check type\n return a.min(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\ndef amax(a, axis=None, out=None, keepdims=False, dtype=None):\n \"\"\"Returns the maximum of an array or the maximum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amax`\n\n \"\"\"\n if fusion._is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amax does not support `keepdims` in fusion yet.')\n return fusion._call_reduction(_statistics.amax,\n a, axis=axis, dtype=dtype, out=out)\n\n # TODO(okuta): check type\n return a.max(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\ndef nanmin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.nanmin`\n\n \"\"\"\n res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any():\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef nanmax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.nanmax`\n\n \"\"\"\n res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any():\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\n# TODO(okuta): Implement ptp\n\n\ndef percentile(a, q, axis=None, out=None, interpolation='linear',\n keepdims=False):\n \"\"\"Computes the q-th percentile of the data along the specified axis.\n\n Args:\n a (cupy.ndarray): Array for which to compute percentiles.\n q (float, tuple of floats or cupy.ndarray): Percentiles to compute\n in the range between 0 and 100 inclusive.\n axis (int or tuple of ints): Along which axis or axes to compute the\n percentiles. The flattened array is used by default.\n out (cupy.ndarray): Output array.\n interpolation (str): Interpolation method when a quantile lies between\n two data points. ``linear`` interpolation is used by default.\n Supported interpolations are``lower``, ``higher``, ``midpoint``,\n ``nearest`` and ``linear``.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The percentiles of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.percentile`\n\n \"\"\"\n q = cupy.asarray(q, dtype=a.dtype)\n if q.ndim == 0:\n q = q[None]\n zerod = True\n else:\n zerod = False\n if q.ndim > 1:\n raise ValueError('Expected q to have a dimension of 1.\\n'\n 'Actual: {0} != 1'.format(q.ndim))\n\n if keepdims:\n if axis is None:\n keepdim = (1,) * a.ndim\n else:\n keepdim = list(a.shape)\n for ax in axis:\n keepdim[ax % a.ndim] = 1\n keepdim = tuple(keepdim)\n\n # Copy a since we need it sorted but without modifying the original array\n if isinstance(axis, int):\n axis = axis,\n if axis is None:\n ap = a.flatten()\n nkeep = 0\n else:\n # Reduce axes from a and put them last\n axis = tuple(ax % a.ndim for ax in axis)\n keep = set(range(a.ndim)) - set(axis)\n nkeep = len(keep)\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()\n\n axis = -1\n ap.sort(axis=axis)\n Nx = ap.shape[axis]\n indices = q * 0.01 * (Nx - 1.) # percents to decimals\n\n if interpolation == 'lower':\n indices = cupy.floor(indices).astype(cupy.int32)\n elif interpolation == 'higher':\n indices = cupy.ceil(indices).astype(cupy.int32)\n elif interpolation == 'midpoint':\n indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))\n elif interpolation == 'nearest':\n # TODO(hvy): Implement nearest using around\n raise ValueError('\\'nearest\\' interpolation is not yet supported. '\n 'Please use any other interpolation method.')\n elif interpolation == 'linear':\n pass\n else:\n raise ValueError('Unexpected interpolation method.\\n'\n 'Actual: \\'{0}\\' not in (\\'linear\\', \\'lower\\', '\n '\\'higher\\', \\'midpoint\\')'.format(interpolation))\n\n if indices.dtype == cupy.int32:\n ret = cupy.rollaxis(ap, axis)\n ret = ret.take(indices, axis=0, out=out)\n else:\n if out is None:\n ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)\n else:\n ret = cupy.rollaxis(out, 0, out.ndim)\n\n cupy.ElementwiseKernel(\n 'S idx, raw T a, raw int32 offset', 'U ret',\n '''\n ptrdiff_t idx_below = floor(idx);\n U weight_above = idx - idx_below;\n\n ptrdiff_t offset_i = _ind.get()[0] * offset;\n ret = a[offset_i + idx_below] * (1.0 - weight_above)\n + a[offset_i + idx_below + 1] * weight_above;\n ''',\n 'percentile_weightnening'\n )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)\n ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis\n\n if zerod:\n ret = ret.squeeze(0)\n if keepdims:\n if q.size > 1:\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n\n return core._internal_ascontiguousarray(ret)\n", "path": "cupy/statistics/order.py"}]} | 3,980 | 273 |
gh_patches_debug_25641 | rasdani/github-patches | git_diff | sublimelsp__LSP-1573 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Rename…" code action is broken
(v1.1.6) The _Rename_ code action doesn’t work correctly. I wasn’t able to use it in VSCode so I can’t tell whether it comes from the language server of the Sublime extension.
Here is a minimal example:
```elm
module Test exposing (..)
import Html exposing (Html)
view : Html msg
view =
Html.text body
body : String
body =
"..."
```
When renaming `body` for instance:
* if the new name has the same length, it seems to work fine
* if the new name is longer (`bodyxyz` in the example below), the last few characters are duplicated:
```elm
view : Html msg
view =
Html.text bodyxyzxyz
bodyxyzxyz : String
bodyxyzxyz =
"..."
```
* if the new name is shorter (`a` in this example), well…
```elm
view : Html msg
view =
Html.text aaString
a "..."
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/edit.py`
Content:
```
1 from .logging import debug
2 from .open import open_file
3 from .promise import Promise
4 from .typing import List, Dict, Any, Iterable, Optional, Tuple
5 from .url import uri_to_filename
6 from functools import partial
7 import operator
8 import sublime
9
10
11 # tuple of start, end, newText, version
12 TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]]
13
14
15 def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:
16 changes = {} # type: Dict[str, List[TextEdit]]
17 raw_changes = workspace_edit.get('changes')
18 if isinstance(raw_changes, dict):
19 for uri, file_changes in raw_changes.items():
20 changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)
21 document_changes = workspace_edit.get('documentChanges')
22 if isinstance(document_changes, list):
23 for document_change in document_changes:
24 if 'kind' in document_change:
25 debug('Ignoring unsupported "resourceOperations" edit type')
26 continue
27 uri = document_change.get('textDocument').get('uri')
28 version = document_change.get('textDocument').get('version')
29 text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))
30 changes.setdefault(uri_to_filename(uri), []).extend(text_edit)
31 return changes
32
33
34 def parse_range(range: Dict[str, int]) -> Tuple[int, int]:
35 return range['line'], range['character']
36
37
38 def parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit:
39 return (
40 parse_range(text_edit['range']['start']),
41 parse_range(text_edit['range']['end']),
42 # Strip away carriage returns -- SublimeText takes care of that.
43 text_edit.get('newText', '').replace("\r", ""),
44 version
45 )
46
47
48 def sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]:
49 # The spec reads:
50 # > However, it is possible that multiple edits have the same start position: multiple
51 # > inserts, or any number of inserts followed by a single remove or replace edit. If
52 # > multiple inserts have the same position, the order in the array defines the order in
53 # > which the inserted strings appear in the resulting text.
54 # So we sort by start position. But if multiple text edits start at the same position,
55 # we use the index in the array as the key.
56
57 return list(sorted(changes, key=operator.itemgetter(0)))
58
59
60 def apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise:
61 """Apply workspace edits. This function must be called from the main thread!"""
62 return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()])
63
64
65 def _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None:
66 if view and view.is_valid():
67 # Text commands run blocking. After this call has returned the changes are applied.
68 view.run_command("lsp_apply_document_edit", {"changes": edits})
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/edit.py b/plugin/core/edit.py
--- a/plugin/core/edit.py
+++ b/plugin/core/edit.py
@@ -14,10 +14,6 @@
def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:
changes = {} # type: Dict[str, List[TextEdit]]
- raw_changes = workspace_edit.get('changes')
- if isinstance(raw_changes, dict):
- for uri, file_changes in raw_changes.items():
- changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)
document_changes = workspace_edit.get('documentChanges')
if isinstance(document_changes, list):
for document_change in document_changes:
@@ -28,6 +24,11 @@
version = document_change.get('textDocument').get('version')
text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))
changes.setdefault(uri_to_filename(uri), []).extend(text_edit)
+ else:
+ raw_changes = workspace_edit.get('changes')
+ if isinstance(raw_changes, dict):
+ for uri, file_changes in raw_changes.items():
+ changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)
return changes
| {"golden_diff": "diff --git a/plugin/core/edit.py b/plugin/core/edit.py\n--- a/plugin/core/edit.py\n+++ b/plugin/core/edit.py\n@@ -14,10 +14,6 @@\n \n def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n- raw_changes = workspace_edit.get('changes')\n- if isinstance(raw_changes, dict):\n- for uri, file_changes in raw_changes.items():\n- changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n@@ -28,6 +24,11 @@\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n+ else:\n+ raw_changes = workspace_edit.get('changes')\n+ if isinstance(raw_changes, dict):\n+ for uri, file_changes in raw_changes.items():\n+ changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n return changes\n", "issue": "\"Rename\u2026\" code action is broken\n(v1.1.6) The _Rename_ code action doesn\u2019t work correctly. I wasn\u2019t able to use it in VSCode so I can\u2019t tell whether it comes from the language server of the Sublime extension.\r\n\r\nHere is a minimal example:\r\n\r\n```elm\r\nmodule Test exposing (..)\r\n\r\nimport Html exposing (Html)\r\n\r\n\r\nview : Html msg\r\nview =\r\n Html.text body\r\n\r\n\r\nbody : String\r\nbody =\r\n \"...\"\r\n```\r\n\r\nWhen renaming `body` for instance:\r\n\r\n* if the new name has the same length, it seems to work fine\r\n* if the new name is longer (`bodyxyz` in the example below), the last few characters are duplicated:\r\n```elm\r\n\r\nview : Html msg\r\nview =\r\n Html.text bodyxyzxyz\r\n\r\n\r\nbodyxyzxyz : String\r\nbodyxyzxyz =\r\n \"...\"\r\n```\r\n* if the new name is shorter (`a` in this example), well\u2026\r\n```elm\r\nview : Html msg\r\nview =\r\n Html.text aaString\r\na \"...\"\r\n```\n", "before_files": [{"content": "from .logging import debug\nfrom .open import open_file\nfrom .promise import Promise\nfrom .typing import List, Dict, Any, Iterable, Optional, Tuple\nfrom .url import uri_to_filename\nfrom functools import partial\nimport operator\nimport sublime\n\n\n# tuple of start, end, newText, version\nTextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]]\n\n\ndef parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n raw_changes = workspace_edit.get('changes')\n if isinstance(raw_changes, dict):\n for uri, file_changes in raw_changes.items():\n changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n if 'kind' in document_change:\n debug('Ignoring unsupported \"resourceOperations\" edit type')\n continue\n uri = document_change.get('textDocument').get('uri')\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n return changes\n\n\ndef parse_range(range: Dict[str, int]) -> Tuple[int, int]:\n return range['line'], range['character']\n\n\ndef parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit:\n return (\n parse_range(text_edit['range']['start']),\n parse_range(text_edit['range']['end']),\n # Strip away carriage returns -- SublimeText takes care of that.\n text_edit.get('newText', '').replace(\"\\r\", \"\"),\n version\n )\n\n\ndef sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]:\n # The spec reads:\n # > However, it is possible that multiple edits have the same start position: multiple\n # > inserts, or any number of inserts followed by a single remove or replace edit. If\n # > multiple inserts have the same position, the order in the array defines the order in\n # > which the inserted strings appear in the resulting text.\n # So we sort by start position. But if multiple text edits start at the same position,\n # we use the index in the array as the key.\n\n return list(sorted(changes, key=operator.itemgetter(0)))\n\n\ndef apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise:\n \"\"\"Apply workspace edits. This function must be called from the main thread!\"\"\"\n return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()])\n\n\ndef _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None:\n if view and view.is_valid():\n # Text commands run blocking. After this call has returned the changes are applied.\n view.run_command(\"lsp_apply_document_edit\", {\"changes\": edits})\n", "path": "plugin/core/edit.py"}], "after_files": [{"content": "from .logging import debug\nfrom .open import open_file\nfrom .promise import Promise\nfrom .typing import List, Dict, Any, Iterable, Optional, Tuple\nfrom .url import uri_to_filename\nfrom functools import partial\nimport operator\nimport sublime\n\n\n# tuple of start, end, newText, version\nTextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]]\n\n\ndef parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n if 'kind' in document_change:\n debug('Ignoring unsupported \"resourceOperations\" edit type')\n continue\n uri = document_change.get('textDocument').get('uri')\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n else:\n raw_changes = workspace_edit.get('changes')\n if isinstance(raw_changes, dict):\n for uri, file_changes in raw_changes.items():\n changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n return changes\n\n\ndef parse_range(range: Dict[str, int]) -> Tuple[int, int]:\n return range['line'], range['character']\n\n\ndef parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit:\n return (\n parse_range(text_edit['range']['start']),\n parse_range(text_edit['range']['end']),\n # Strip away carriage returns -- SublimeText takes care of that.\n text_edit.get('newText', '').replace(\"\\r\", \"\"),\n version\n )\n\n\ndef sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]:\n # The spec reads:\n # > However, it is possible that multiple edits have the same start position: multiple\n # > inserts, or any number of inserts followed by a single remove or replace edit. If\n # > multiple inserts have the same position, the order in the array defines the order in\n # > which the inserted strings appear in the resulting text.\n # So we sort by start position. But if multiple text edits start at the same position,\n # we use the index in the array as the key.\n\n return list(sorted(changes, key=operator.itemgetter(0)))\n\n\ndef apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise:\n \"\"\"Apply workspace edits. This function must be called from the main thread!\"\"\"\n return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()])\n\n\ndef _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None:\n if view and view.is_valid():\n # Text commands run blocking. After this call has returned the changes are applied.\n view.run_command(\"lsp_apply_document_edit\", {\"changes\": edits})\n", "path": "plugin/core/edit.py"}]} | 1,283 | 282 |
gh_patches_debug_14435 | rasdani/github-patches | git_diff | fossasia__open-event-server-5247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent data on Admin Statistics end points
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server
**Current behavior:**
<!-- Describe how the bug manifests. -->
The admin statistics end points return various stats about events, users etc.
Currently they are completely out of sync with the database.
For instance, the `admin/statisitics/events` returns

Where as the actual event count when generically querying for events is 92:

**Expected behavior:**
<!-- Describe what the behavior would be without the bug. -->
The counts should be consistent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/schema/admin_statistics_schema/events.py`
Content:
```
1 from marshmallow_jsonapi.flask import Schema
2 from marshmallow_jsonapi import fields
3 from app.models.event import Event
4 from app.api.helpers.db import get_count
5 from app.api.helpers.utilities import dasherize
6 from datetime import datetime
7 import pytz
8
9
10 class AdminStatisticsEventSchema(Schema):
11 """
12 Api schema
13 """
14 class Meta:
15 """
16 Meta class
17 """
18 type_ = 'admin-statistics-event'
19 self_view = 'v1.admin_statistics_event_detail'
20 inflect = dasherize
21
22 id = fields.String()
23 draft = fields.Method("events_draft_count")
24 published = fields.Method("events_published_count")
25 past = fields.Method("events_past_count")
26
27 def events_draft_count(self, obj):
28 return get_count(Event.query.filter_by(state='draft'))
29
30 def events_published_count(self, obj):
31 return get_count(Event.query.filter_by(state='published'))
32
33 def events_past_count(self, obj):
34 return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/schema/admin_statistics_schema/events.py b/app/api/schema/admin_statistics_schema/events.py
--- a/app/api/schema/admin_statistics_schema/events.py
+++ b/app/api/schema/admin_statistics_schema/events.py
@@ -25,10 +25,12 @@
past = fields.Method("events_past_count")
def events_draft_count(self, obj):
- return get_count(Event.query.filter_by(state='draft'))
+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))
+ return get_count(events.filter_by(state='draft'))
def events_published_count(self, obj):
- return get_count(Event.query.filter_by(state='published'))
+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))
+ return get_count(events.filter_by(state='published'))
def events_past_count(self, obj):
return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))
| {"golden_diff": "diff --git a/app/api/schema/admin_statistics_schema/events.py b/app/api/schema/admin_statistics_schema/events.py\n--- a/app/api/schema/admin_statistics_schema/events.py\n+++ b/app/api/schema/admin_statistics_schema/events.py\n@@ -25,10 +25,12 @@\n past = fields.Method(\"events_past_count\")\n \n def events_draft_count(self, obj):\n- return get_count(Event.query.filter_by(state='draft'))\n+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n+ return get_count(events.filter_by(state='draft'))\n \n def events_published_count(self, obj):\n- return get_count(Event.query.filter_by(state='published'))\n+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n+ return get_count(events.filter_by(state='published'))\n \n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "issue": "Inconsistent data on Admin Statistics end points\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\nThe admin statistics end points return various stats about events, users etc.\r\nCurrently they are completely out of sync with the database.\r\nFor instance, the `admin/statisitics/events` returns \r\n\r\n\r\nWhere as the actual event count when generically querying for events is 92:\r\n\r\n\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\nThe counts should be consistent.\n", "before_files": [{"content": "from marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom app.models.event import Event\nfrom app.api.helpers.db import get_count\nfrom app.api.helpers.utilities import dasherize\nfrom datetime import datetime\nimport pytz\n\n\nclass AdminStatisticsEventSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'admin-statistics-event'\n self_view = 'v1.admin_statistics_event_detail'\n inflect = dasherize\n\n id = fields.String()\n draft = fields.Method(\"events_draft_count\")\n published = fields.Method(\"events_published_count\")\n past = fields.Method(\"events_past_count\")\n\n def events_draft_count(self, obj):\n return get_count(Event.query.filter_by(state='draft'))\n\n def events_published_count(self, obj):\n return get_count(Event.query.filter_by(state='published'))\n\n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "path": "app/api/schema/admin_statistics_schema/events.py"}], "after_files": [{"content": "from marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom app.models.event import Event\nfrom app.api.helpers.db import get_count\nfrom app.api.helpers.utilities import dasherize\nfrom datetime import datetime\nimport pytz\n\n\nclass AdminStatisticsEventSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'admin-statistics-event'\n self_view = 'v1.admin_statistics_event_detail'\n inflect = dasherize\n\n id = fields.String()\n draft = fields.Method(\"events_draft_count\")\n published = fields.Method(\"events_published_count\")\n past = fields.Method(\"events_past_count\")\n\n def events_draft_count(self, obj):\n events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n return get_count(events.filter_by(state='draft'))\n\n def events_published_count(self, obj):\n events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n return get_count(events.filter_by(state='published'))\n\n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "path": "app/api/schema/admin_statistics_schema/events.py"}]} | 833 | 202 |
gh_patches_debug_28999 | rasdani/github-patches | git_diff | biopython__biopython-3662 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Order of coordinates of a multi-part feature that maps to the reverse strand maybe 'wrong' in GenBank file generated from SnapGene file?
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
```
3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37)
[GCC 9.3.0]
CPython
Linux-5.4.72-microsoft-standard-WSL2-x86_64-with-debian-bullseye-sid
1.79
```
### Expected behaviour
Cloning programs/webapps such as SnapGene or Benchling should display all features included in a GenBank file that has been converted from a SnapGene file with BioPython.
### Actual behaviour
Cloning programs/webapps such as SnapGene or Benchling do not display all features included in a GenBank file that has been converted from a SnapGene file with BioPython. The problematic features are those that contain multiple elements on the reverse strand, for example `complement(join(3873..3941,3081..3872))`. If I reverse the order of those elements, that is, `complement(join(3081..3872,3873..3941))`, then everything's good, and the feature is shown in my fav cloning programs/webapp. I guess the elements of a feature that maps to the reverse/complement strand should nonetheless be listed in an 'increasing order'?
Although it's most likely a coincidence, I also noticed that for the feature mentioned above, BioPython fails to include one of the notes that SnapGene instead includes in the exported file (the one at the very bottom of the left panel)

### Steps to reproduce
I am attaching the original SnapGene map and those converted to GenBank with SnapGene and BioPython to this message.
[test_plasmid.zip](https://github.com/biopython/biopython/files/6765312/test_plasmid.zip)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Bio/SeqIO/SnapGeneIO.py`
Content:
```
1 # Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7 """Bio.SeqIO support for the SnapGene file format.
8
9 The SnapGene binary format is the native format used by the SnapGene program
10 from GSL Biotech LLC.
11 """
12 from datetime import datetime
13 from re import sub
14 from struct import unpack
15 from xml.dom.minidom import parseString
16
17 from Bio.Seq import Seq
18 from Bio.SeqFeature import FeatureLocation
19 from Bio.SeqFeature import SeqFeature
20 from Bio.SeqRecord import SeqRecord
21
22 from .Interfaces import SequenceIterator
23
24
25 def _iterate(handle):
26 """Iterate over the packets of a SnapGene file.
27
28 A SnapGene file is made of packets, each packet being a TLV-like
29 structure comprising:
30
31 - 1 single byte indicating the packet's type;
32 - 1 big-endian long integer (4 bytes) indicating the length of the
33 packet's data;
34 - the actual data.
35 """
36 while True:
37 packet_type = handle.read(1)
38 if len(packet_type) < 1: # No more packet
39 return
40 packet_type = unpack(">B", packet_type)[0]
41
42 length = handle.read(4)
43 if len(length) < 4:
44 raise ValueError("Unexpected end of packet")
45 length = unpack(">I", length)[0]
46
47 data = handle.read(length)
48 if len(data) < length:
49 raise ValueError("Unexpected end of packet")
50
51 yield (packet_type, length, data)
52
53
54 def _parse_dna_packet(length, data, record):
55 """Parse a DNA sequence packet.
56
57 A DNA sequence packet contains a single byte flag followed by the
58 sequence itself.
59 """
60 if record.seq:
61 raise ValueError("The file contains more than one DNA packet")
62
63 flags, sequence = unpack(">B%ds" % (length - 1), data)
64 record.seq = Seq(sequence.decode("ASCII"))
65 record.annotations["molecule_type"] = "DNA"
66 if flags & 0x01:
67 record.annotations["topology"] = "circular"
68 else:
69 record.annotations["topology"] = "linear"
70
71
72 def _parse_notes_packet(length, data, record):
73 """Parse a 'Notes' packet.
74
75 This type of packet contains some metadata about the sequence. They
76 are stored as a XML string with a 'Notes' root node.
77 """
78 xml = parseString(data.decode("UTF-8"))
79 type = _get_child_value(xml, "Type")
80 if type == "Synthetic":
81 record.annotations["data_file_division"] = "SYN"
82 else:
83 record.annotations["data_file_division"] = "UNC"
84
85 date = _get_child_value(xml, "LastModified")
86 if date:
87 record.annotations["date"] = datetime.strptime(date, "%Y.%m.%d")
88
89 acc = _get_child_value(xml, "AccessionNumber")
90 if acc:
91 record.id = acc
92
93 comment = _get_child_value(xml, "Comments")
94 if comment:
95 record.name = comment.split(" ", 1)[0]
96 record.description = comment
97 if not acc:
98 record.id = record.name
99
100
101 def _parse_cookie_packet(length, data, record):
102 """Parse a SnapGene cookie packet.
103
104 Every SnapGene file starts with a packet of this type. It acts as
105 a magic cookie identifying the file as a SnapGene file.
106 """
107 cookie, seq_type, exp_version, imp_version = unpack(">8sHHH", data)
108 if cookie.decode("ASCII") != "SnapGene":
109 raise ValueError("The file is not a valid SnapGene file")
110
111
112 def _parse_location(rangespec, strand, record):
113 start, end = [int(x) for x in rangespec.split("-")]
114 # Account for SnapGene's 1-based coordinates
115 start = start - 1
116 if start > end:
117 # Range wrapping the end of the sequence
118 l1 = FeatureLocation(start, len(record), strand=strand)
119 l2 = FeatureLocation(0, end, strand=strand)
120 location = l1 + l2
121 else:
122 location = FeatureLocation(start, end, strand=strand)
123 return location
124
125
126 def _parse_features_packet(length, data, record):
127 """Parse a sequence features packet.
128
129 This packet stores sequence features (except primer binding sites,
130 which are in a dedicated Primers packet). The data is a XML string
131 starting with a 'Features' root node.
132 """
133 xml = parseString(data.decode("UTF-8"))
134 for feature in xml.getElementsByTagName("Feature"):
135 quals = {}
136
137 type = _get_attribute_value(feature, "type", default="misc_feature")
138
139 strand = +1
140 directionality = int(
141 _get_attribute_value(feature, "directionality", default="1")
142 )
143 if directionality == 2:
144 strand = -1
145
146 location = None
147 for segment in feature.getElementsByTagName("Segment"):
148 rng = _get_attribute_value(segment, "range")
149 if not location:
150 location = _parse_location(rng, strand, record)
151 else:
152 location = location + _parse_location(rng, strand, record)
153 if not location:
154 raise ValueError("Missing feature location")
155
156 for qualifier in feature.getElementsByTagName("Q"):
157 qname = _get_attribute_value(
158 qualifier, "name", error="Missing qualifier name"
159 )
160 qvalues = []
161 for value in qualifier.getElementsByTagName("V"):
162 if value.hasAttribute("text"):
163 qvalues.append(_decode(value.attributes["text"].value))
164 elif value.hasAttribute("predef"):
165 qvalues.append(_decode(value.attributes["predef"].value))
166 elif value.hasAttribute("int"):
167 qvalues.append(int(value.attributes["int"].value))
168 quals[qname] = qvalues
169
170 name = _get_attribute_value(feature, "name")
171 if name:
172 if "label" not in quals:
173 # No explicit label attribute, use the SnapGene name
174 quals["label"] = [name]
175 elif name not in quals["label"]:
176 # The SnapGene name is different from the label,
177 # add a specific attribute to represent it
178 quals["name"] = [name]
179
180 feature = SeqFeature(location, type=type, qualifiers=quals)
181 record.features.append(feature)
182
183
184 def _parse_primers_packet(length, data, record):
185 """Parse a Primers packet.
186
187 A Primers packet is similar to a Features packet but specifically
188 stores primer binding features. The data is a XML string starting
189 with a 'Primers' root node.
190 """
191 xml = parseString(data.decode("UTF-8"))
192 for primer in xml.getElementsByTagName("Primer"):
193 quals = {}
194
195 name = _get_attribute_value(primer, "name")
196 if name:
197 quals["label"] = [name]
198
199 for site in primer.getElementsByTagName("BindingSite"):
200 rng = _get_attribute_value(
201 site, "location", error="Missing binding site location"
202 )
203 strand = int(_get_attribute_value(site, "boundStrand", default="0"))
204 if strand == 1:
205 strand = -1
206 else:
207 strand = +1
208
209 feature = SeqFeature(
210 _parse_location(rng, strand, record),
211 type="primer_bind",
212 qualifiers=quals,
213 )
214 record.features.append(feature)
215
216
217 _packet_handlers = {
218 0x00: _parse_dna_packet,
219 0x05: _parse_primers_packet,
220 0x06: _parse_notes_packet,
221 0x0A: _parse_features_packet,
222 }
223
224
225 # Helper functions to process the XML data in
226 # some of the segments
227
228
229 def _decode(text):
230 # Get rid of HTML tags in some values
231 return sub("<[^>]+>", "", text)
232
233
234 def _get_attribute_value(node, name, default=None, error=None):
235 if node.hasAttribute(name):
236 return _decode(node.attributes[name].value)
237 elif error:
238 raise ValueError(error)
239 else:
240 return default
241
242
243 def _get_child_value(node, name, default=None, error=None):
244 children = node.getElementsByTagName(name)
245 if (
246 children
247 and children[0].childNodes
248 and children[0].firstChild.nodeType == node.TEXT_NODE
249 ):
250 return _decode(children[0].firstChild.data)
251 elif error:
252 raise ValueError(error)
253 else:
254 return default
255
256
257 class SnapGeneIterator(SequenceIterator):
258 """Parser for SnapGene files."""
259
260 def __init__(self, source):
261 """Parse a SnapGene file and return a SeqRecord object.
262
263 Argument source is a file-like object or a path to a file.
264
265 Note that a SnapGene file can only contain one sequence, so this
266 iterator will always return a single record.
267 """
268 super().__init__(source, mode="b", fmt="SnapGene")
269
270 def parse(self, handle):
271 """Start parsing the file, and return a SeqRecord generator."""
272 records = self.iterate(handle)
273 return records
274
275 def iterate(self, handle):
276 """Iterate over the records in the SnapGene file."""
277 record = SeqRecord(None)
278 packets = _iterate(handle)
279 try:
280 packet_type, length, data = next(packets)
281 except StopIteration:
282 raise ValueError("Empty file.") from None
283
284 if packet_type != 0x09:
285 raise ValueError("The file does not start with a SnapGene cookie packet")
286 _parse_cookie_packet(length, data, record)
287
288 for (packet_type, length, data) in packets:
289 handler = _packet_handlers.get(packet_type)
290 if handler is not None:
291 handler(length, data, record)
292
293 if not record.seq:
294 raise ValueError("No DNA packet in file")
295
296 yield record
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Bio/SeqIO/SnapGeneIO.py b/Bio/SeqIO/SnapGeneIO.py
--- a/Bio/SeqIO/SnapGeneIO.py
+++ b/Bio/SeqIO/SnapGeneIO.py
@@ -144,12 +144,33 @@
strand = -1
location = None
+ subparts = []
+ n_parts = 0
for segment in feature.getElementsByTagName("Segment"):
+ if _get_attribute_value(segment, "type", "standard") == "gap":
+ continue
rng = _get_attribute_value(segment, "range")
+ n_parts += 1
+ next_location = _parse_location(rng, strand, record)
if not location:
- location = _parse_location(rng, strand, record)
+ location = next_location
+ elif strand == -1:
+ # Reverse segments order for reverse-strand features
+ location = next_location + location
else:
- location = location + _parse_location(rng, strand, record)
+ location = location + next_location
+
+ name = _get_attribute_value(segment, "name")
+ if name:
+ subparts.append([n_parts, name])
+
+ if len(subparts) > 0:
+ # Add a "parts" qualifiers to represent "named subfeatures"
+ if strand == -1:
+ # Reverse segment indexes and order for reverse-strand features
+ subparts = reversed([[n_parts - i + 1, name] for i, name in subparts])
+ quals["parts"] = [";".join("{}:{}".format(i, name) for i, name in subparts)]
+
if not location:
raise ValueError("Missing feature location")
@@ -221,7 +242,6 @@
0x0A: _parse_features_packet,
}
-
# Helper functions to process the XML data in
# some of the segments
| {"golden_diff": "diff --git a/Bio/SeqIO/SnapGeneIO.py b/Bio/SeqIO/SnapGeneIO.py\n--- a/Bio/SeqIO/SnapGeneIO.py\n+++ b/Bio/SeqIO/SnapGeneIO.py\n@@ -144,12 +144,33 @@\n strand = -1\n \n location = None\n+ subparts = []\n+ n_parts = 0\n for segment in feature.getElementsByTagName(\"Segment\"):\n+ if _get_attribute_value(segment, \"type\", \"standard\") == \"gap\":\n+ continue\n rng = _get_attribute_value(segment, \"range\")\n+ n_parts += 1\n+ next_location = _parse_location(rng, strand, record)\n if not location:\n- location = _parse_location(rng, strand, record)\n+ location = next_location\n+ elif strand == -1:\n+ # Reverse segments order for reverse-strand features\n+ location = next_location + location\n else:\n- location = location + _parse_location(rng, strand, record)\n+ location = location + next_location\n+\n+ name = _get_attribute_value(segment, \"name\")\n+ if name:\n+ subparts.append([n_parts, name])\n+\n+ if len(subparts) > 0:\n+ # Add a \"parts\" qualifiers to represent \"named subfeatures\"\n+ if strand == -1:\n+ # Reverse segment indexes and order for reverse-strand features\n+ subparts = reversed([[n_parts - i + 1, name] for i, name in subparts])\n+ quals[\"parts\"] = [\";\".join(\"{}:{}\".format(i, name) for i, name in subparts)]\n+\n if not location:\n raise ValueError(\"Missing feature location\")\n \n@@ -221,7 +242,6 @@\n 0x0A: _parse_features_packet,\n }\n \n-\n # Helper functions to process the XML data in\n # some of the segments\n", "issue": "Order of coordinates of a multi-part feature that maps to the reverse strand maybe 'wrong' in GenBank file generated from SnapGene file?\n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n```\r\n3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) \r\n[GCC 9.3.0]\r\nCPython\r\nLinux-5.4.72-microsoft-standard-WSL2-x86_64-with-debian-bullseye-sid\r\n1.79\r\n```\r\n\r\n### Expected behaviour\r\nCloning programs/webapps such as SnapGene or Benchling should display all features included in a GenBank file that has been converted from a SnapGene file with BioPython.\r\n\r\n### Actual behaviour\r\nCloning programs/webapps such as SnapGene or Benchling do not display all features included in a GenBank file that has been converted from a SnapGene file with BioPython. The problematic features are those that contain multiple elements on the reverse strand, for example `complement(join(3873..3941,3081..3872))`. If I reverse the order of those elements, that is, `complement(join(3081..3872,3873..3941))`, then everything's good, and the feature is shown in my fav cloning programs/webapp. I guess the elements of a feature that maps to the reverse/complement strand should nonetheless be listed in an 'increasing order'? \r\n\r\nAlthough it's most likely a coincidence, I also noticed that for the feature mentioned above, BioPython fails to include one of the notes that SnapGene instead includes in the exported file (the one at the very bottom of the left panel)\r\n\r\n\r\n\r\n### Steps to reproduce\r\n\r\nI am attaching the original SnapGene map and those converted to GenBank with SnapGene and BioPython to this message.\r\n\r\n[test_plasmid.zip](https://github.com/biopython/biopython/files/6765312/test_plasmid.zip)\n", "before_files": [{"content": "# Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Bio.SeqIO support for the SnapGene file format.\n\nThe SnapGene binary format is the native format used by the SnapGene program\nfrom GSL Biotech LLC.\n\"\"\"\nfrom datetime import datetime\nfrom re import sub\nfrom struct import unpack\nfrom xml.dom.minidom import parseString\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import FeatureLocation\nfrom Bio.SeqFeature import SeqFeature\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .Interfaces import SequenceIterator\n\n\ndef _iterate(handle):\n \"\"\"Iterate over the packets of a SnapGene file.\n\n A SnapGene file is made of packets, each packet being a TLV-like\n structure comprising:\n\n - 1 single byte indicating the packet's type;\n - 1 big-endian long integer (4 bytes) indicating the length of the\n packet's data;\n - the actual data.\n \"\"\"\n while True:\n packet_type = handle.read(1)\n if len(packet_type) < 1: # No more packet\n return\n packet_type = unpack(\">B\", packet_type)[0]\n\n length = handle.read(4)\n if len(length) < 4:\n raise ValueError(\"Unexpected end of packet\")\n length = unpack(\">I\", length)[0]\n\n data = handle.read(length)\n if len(data) < length:\n raise ValueError(\"Unexpected end of packet\")\n\n yield (packet_type, length, data)\n\n\ndef _parse_dna_packet(length, data, record):\n \"\"\"Parse a DNA sequence packet.\n\n A DNA sequence packet contains a single byte flag followed by the\n sequence itself.\n \"\"\"\n if record.seq:\n raise ValueError(\"The file contains more than one DNA packet\")\n\n flags, sequence = unpack(\">B%ds\" % (length - 1), data)\n record.seq = Seq(sequence.decode(\"ASCII\"))\n record.annotations[\"molecule_type\"] = \"DNA\"\n if flags & 0x01:\n record.annotations[\"topology\"] = \"circular\"\n else:\n record.annotations[\"topology\"] = \"linear\"\n\n\ndef _parse_notes_packet(length, data, record):\n \"\"\"Parse a 'Notes' packet.\n\n This type of packet contains some metadata about the sequence. They\n are stored as a XML string with a 'Notes' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n type = _get_child_value(xml, \"Type\")\n if type == \"Synthetic\":\n record.annotations[\"data_file_division\"] = \"SYN\"\n else:\n record.annotations[\"data_file_division\"] = \"UNC\"\n\n date = _get_child_value(xml, \"LastModified\")\n if date:\n record.annotations[\"date\"] = datetime.strptime(date, \"%Y.%m.%d\")\n\n acc = _get_child_value(xml, \"AccessionNumber\")\n if acc:\n record.id = acc\n\n comment = _get_child_value(xml, \"Comments\")\n if comment:\n record.name = comment.split(\" \", 1)[0]\n record.description = comment\n if not acc:\n record.id = record.name\n\n\ndef _parse_cookie_packet(length, data, record):\n \"\"\"Parse a SnapGene cookie packet.\n\n Every SnapGene file starts with a packet of this type. It acts as\n a magic cookie identifying the file as a SnapGene file.\n \"\"\"\n cookie, seq_type, exp_version, imp_version = unpack(\">8sHHH\", data)\n if cookie.decode(\"ASCII\") != \"SnapGene\":\n raise ValueError(\"The file is not a valid SnapGene file\")\n\n\ndef _parse_location(rangespec, strand, record):\n start, end = [int(x) for x in rangespec.split(\"-\")]\n # Account for SnapGene's 1-based coordinates\n start = start - 1\n if start > end:\n # Range wrapping the end of the sequence\n l1 = FeatureLocation(start, len(record), strand=strand)\n l2 = FeatureLocation(0, end, strand=strand)\n location = l1 + l2\n else:\n location = FeatureLocation(start, end, strand=strand)\n return location\n\n\ndef _parse_features_packet(length, data, record):\n \"\"\"Parse a sequence features packet.\n\n This packet stores sequence features (except primer binding sites,\n which are in a dedicated Primers packet). The data is a XML string\n starting with a 'Features' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for feature in xml.getElementsByTagName(\"Feature\"):\n quals = {}\n\n type = _get_attribute_value(feature, \"type\", default=\"misc_feature\")\n\n strand = +1\n directionality = int(\n _get_attribute_value(feature, \"directionality\", default=\"1\")\n )\n if directionality == 2:\n strand = -1\n\n location = None\n for segment in feature.getElementsByTagName(\"Segment\"):\n rng = _get_attribute_value(segment, \"range\")\n if not location:\n location = _parse_location(rng, strand, record)\n else:\n location = location + _parse_location(rng, strand, record)\n if not location:\n raise ValueError(\"Missing feature location\")\n\n for qualifier in feature.getElementsByTagName(\"Q\"):\n qname = _get_attribute_value(\n qualifier, \"name\", error=\"Missing qualifier name\"\n )\n qvalues = []\n for value in qualifier.getElementsByTagName(\"V\"):\n if value.hasAttribute(\"text\"):\n qvalues.append(_decode(value.attributes[\"text\"].value))\n elif value.hasAttribute(\"predef\"):\n qvalues.append(_decode(value.attributes[\"predef\"].value))\n elif value.hasAttribute(\"int\"):\n qvalues.append(int(value.attributes[\"int\"].value))\n quals[qname] = qvalues\n\n name = _get_attribute_value(feature, \"name\")\n if name:\n if \"label\" not in quals:\n # No explicit label attribute, use the SnapGene name\n quals[\"label\"] = [name]\n elif name not in quals[\"label\"]:\n # The SnapGene name is different from the label,\n # add a specific attribute to represent it\n quals[\"name\"] = [name]\n\n feature = SeqFeature(location, type=type, qualifiers=quals)\n record.features.append(feature)\n\n\ndef _parse_primers_packet(length, data, record):\n \"\"\"Parse a Primers packet.\n\n A Primers packet is similar to a Features packet but specifically\n stores primer binding features. The data is a XML string starting\n with a 'Primers' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for primer in xml.getElementsByTagName(\"Primer\"):\n quals = {}\n\n name = _get_attribute_value(primer, \"name\")\n if name:\n quals[\"label\"] = [name]\n\n for site in primer.getElementsByTagName(\"BindingSite\"):\n rng = _get_attribute_value(\n site, \"location\", error=\"Missing binding site location\"\n )\n strand = int(_get_attribute_value(site, \"boundStrand\", default=\"0\"))\n if strand == 1:\n strand = -1\n else:\n strand = +1\n\n feature = SeqFeature(\n _parse_location(rng, strand, record),\n type=\"primer_bind\",\n qualifiers=quals,\n )\n record.features.append(feature)\n\n\n_packet_handlers = {\n 0x00: _parse_dna_packet,\n 0x05: _parse_primers_packet,\n 0x06: _parse_notes_packet,\n 0x0A: _parse_features_packet,\n}\n\n\n# Helper functions to process the XML data in\n# some of the segments\n\n\ndef _decode(text):\n # Get rid of HTML tags in some values\n return sub(\"<[^>]+>\", \"\", text)\n\n\ndef _get_attribute_value(node, name, default=None, error=None):\n if node.hasAttribute(name):\n return _decode(node.attributes[name].value)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\ndef _get_child_value(node, name, default=None, error=None):\n children = node.getElementsByTagName(name)\n if (\n children\n and children[0].childNodes\n and children[0].firstChild.nodeType == node.TEXT_NODE\n ):\n return _decode(children[0].firstChild.data)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\nclass SnapGeneIterator(SequenceIterator):\n \"\"\"Parser for SnapGene files.\"\"\"\n\n def __init__(self, source):\n \"\"\"Parse a SnapGene file and return a SeqRecord object.\n\n Argument source is a file-like object or a path to a file.\n\n Note that a SnapGene file can only contain one sequence, so this\n iterator will always return a single record.\n \"\"\"\n super().__init__(source, mode=\"b\", fmt=\"SnapGene\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the SnapGene file.\"\"\"\n record = SeqRecord(None)\n packets = _iterate(handle)\n try:\n packet_type, length, data = next(packets)\n except StopIteration:\n raise ValueError(\"Empty file.\") from None\n\n if packet_type != 0x09:\n raise ValueError(\"The file does not start with a SnapGene cookie packet\")\n _parse_cookie_packet(length, data, record)\n\n for (packet_type, length, data) in packets:\n handler = _packet_handlers.get(packet_type)\n if handler is not None:\n handler(length, data, record)\n\n if not record.seq:\n raise ValueError(\"No DNA packet in file\")\n\n yield record\n", "path": "Bio/SeqIO/SnapGeneIO.py"}], "after_files": [{"content": "# Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Bio.SeqIO support for the SnapGene file format.\n\nThe SnapGene binary format is the native format used by the SnapGene program\nfrom GSL Biotech LLC.\n\"\"\"\nfrom datetime import datetime\nfrom re import sub\nfrom struct import unpack\nfrom xml.dom.minidom import parseString\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import FeatureLocation\nfrom Bio.SeqFeature import SeqFeature\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .Interfaces import SequenceIterator\n\n\ndef _iterate(handle):\n \"\"\"Iterate over the packets of a SnapGene file.\n\n A SnapGene file is made of packets, each packet being a TLV-like\n structure comprising:\n\n - 1 single byte indicating the packet's type;\n - 1 big-endian long integer (4 bytes) indicating the length of the\n packet's data;\n - the actual data.\n \"\"\"\n while True:\n packet_type = handle.read(1)\n if len(packet_type) < 1: # No more packet\n return\n packet_type = unpack(\">B\", packet_type)[0]\n\n length = handle.read(4)\n if len(length) < 4:\n raise ValueError(\"Unexpected end of packet\")\n length = unpack(\">I\", length)[0]\n\n data = handle.read(length)\n if len(data) < length:\n raise ValueError(\"Unexpected end of packet\")\n\n yield (packet_type, length, data)\n\n\ndef _parse_dna_packet(length, data, record):\n \"\"\"Parse a DNA sequence packet.\n\n A DNA sequence packet contains a single byte flag followed by the\n sequence itself.\n \"\"\"\n if record.seq:\n raise ValueError(\"The file contains more than one DNA packet\")\n\n flags, sequence = unpack(\">B%ds\" % (length - 1), data)\n record.seq = Seq(sequence.decode(\"ASCII\"))\n record.annotations[\"molecule_type\"] = \"DNA\"\n if flags & 0x01:\n record.annotations[\"topology\"] = \"circular\"\n else:\n record.annotations[\"topology\"] = \"linear\"\n\n\ndef _parse_notes_packet(length, data, record):\n \"\"\"Parse a 'Notes' packet.\n\n This type of packet contains some metadata about the sequence. They\n are stored as a XML string with a 'Notes' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n type = _get_child_value(xml, \"Type\")\n if type == \"Synthetic\":\n record.annotations[\"data_file_division\"] = \"SYN\"\n else:\n record.annotations[\"data_file_division\"] = \"UNC\"\n\n date = _get_child_value(xml, \"LastModified\")\n if date:\n record.annotations[\"date\"] = datetime.strptime(date, \"%Y.%m.%d\")\n\n acc = _get_child_value(xml, \"AccessionNumber\")\n if acc:\n record.id = acc\n\n comment = _get_child_value(xml, \"Comments\")\n if comment:\n record.name = comment.split(\" \", 1)[0]\n record.description = comment\n if not acc:\n record.id = record.name\n\n\ndef _parse_cookie_packet(length, data, record):\n \"\"\"Parse a SnapGene cookie packet.\n\n Every SnapGene file starts with a packet of this type. It acts as\n a magic cookie identifying the file as a SnapGene file.\n \"\"\"\n cookie, seq_type, exp_version, imp_version = unpack(\">8sHHH\", data)\n if cookie.decode(\"ASCII\") != \"SnapGene\":\n raise ValueError(\"The file is not a valid SnapGene file\")\n\n\ndef _parse_location(rangespec, strand, record):\n start, end = [int(x) for x in rangespec.split(\"-\")]\n # Account for SnapGene's 1-based coordinates\n start = start - 1\n if start > end:\n # Range wrapping the end of the sequence\n l1 = FeatureLocation(start, len(record), strand=strand)\n l2 = FeatureLocation(0, end, strand=strand)\n location = l1 + l2\n else:\n location = FeatureLocation(start, end, strand=strand)\n return location\n\n\ndef _parse_features_packet(length, data, record):\n \"\"\"Parse a sequence features packet.\n\n This packet stores sequence features (except primer binding sites,\n which are in a dedicated Primers packet). The data is a XML string\n starting with a 'Features' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for feature in xml.getElementsByTagName(\"Feature\"):\n quals = {}\n\n type = _get_attribute_value(feature, \"type\", default=\"misc_feature\")\n\n strand = +1\n directionality = int(\n _get_attribute_value(feature, \"directionality\", default=\"1\")\n )\n if directionality == 2:\n strand = -1\n\n location = None\n subparts = []\n n_parts = 0\n for segment in feature.getElementsByTagName(\"Segment\"):\n if _get_attribute_value(segment, \"type\", \"standard\") == \"gap\":\n continue\n rng = _get_attribute_value(segment, \"range\")\n n_parts += 1\n next_location = _parse_location(rng, strand, record)\n if not location:\n location = next_location\n elif strand == -1:\n # Reverse segments order for reverse-strand features\n location = next_location + location\n else:\n location = location + next_location\n\n name = _get_attribute_value(segment, \"name\")\n if name:\n subparts.append([n_parts, name])\n\n if len(subparts) > 0:\n # Add a \"parts\" qualifiers to represent \"named subfeatures\"\n if strand == -1:\n # Reverse segment indexes and order for reverse-strand features\n subparts = reversed([[n_parts - i + 1, name] for i, name in subparts])\n quals[\"parts\"] = [\";\".join(\"{}:{}\".format(i, name) for i, name in subparts)]\n\n if not location:\n raise ValueError(\"Missing feature location\")\n\n for qualifier in feature.getElementsByTagName(\"Q\"):\n qname = _get_attribute_value(\n qualifier, \"name\", error=\"Missing qualifier name\"\n )\n qvalues = []\n for value in qualifier.getElementsByTagName(\"V\"):\n if value.hasAttribute(\"text\"):\n qvalues.append(_decode(value.attributes[\"text\"].value))\n elif value.hasAttribute(\"predef\"):\n qvalues.append(_decode(value.attributes[\"predef\"].value))\n elif value.hasAttribute(\"int\"):\n qvalues.append(int(value.attributes[\"int\"].value))\n quals[qname] = qvalues\n\n name = _get_attribute_value(feature, \"name\")\n if name:\n if \"label\" not in quals:\n # No explicit label attribute, use the SnapGene name\n quals[\"label\"] = [name]\n elif name not in quals[\"label\"]:\n # The SnapGene name is different from the label,\n # add a specific attribute to represent it\n quals[\"name\"] = [name]\n\n feature = SeqFeature(location, type=type, qualifiers=quals)\n record.features.append(feature)\n\n\ndef _parse_primers_packet(length, data, record):\n \"\"\"Parse a Primers packet.\n\n A Primers packet is similar to a Features packet but specifically\n stores primer binding features. The data is a XML string starting\n with a 'Primers' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for primer in xml.getElementsByTagName(\"Primer\"):\n quals = {}\n\n name = _get_attribute_value(primer, \"name\")\n if name:\n quals[\"label\"] = [name]\n\n for site in primer.getElementsByTagName(\"BindingSite\"):\n rng = _get_attribute_value(\n site, \"location\", error=\"Missing binding site location\"\n )\n strand = int(_get_attribute_value(site, \"boundStrand\", default=\"0\"))\n if strand == 1:\n strand = -1\n else:\n strand = +1\n\n feature = SeqFeature(\n _parse_location(rng, strand, record),\n type=\"primer_bind\",\n qualifiers=quals,\n )\n record.features.append(feature)\n\n\n_packet_handlers = {\n 0x00: _parse_dna_packet,\n 0x05: _parse_primers_packet,\n 0x06: _parse_notes_packet,\n 0x0A: _parse_features_packet,\n}\n\n# Helper functions to process the XML data in\n# some of the segments\n\n\ndef _decode(text):\n # Get rid of HTML tags in some values\n return sub(\"<[^>]+>\", \"\", text)\n\n\ndef _get_attribute_value(node, name, default=None, error=None):\n if node.hasAttribute(name):\n return _decode(node.attributes[name].value)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\ndef _get_child_value(node, name, default=None, error=None):\n children = node.getElementsByTagName(name)\n if (\n children\n and children[0].childNodes\n and children[0].firstChild.nodeType == node.TEXT_NODE\n ):\n return _decode(children[0].firstChild.data)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\nclass SnapGeneIterator(SequenceIterator):\n \"\"\"Parser for SnapGene files.\"\"\"\n\n def __init__(self, source):\n \"\"\"Parse a SnapGene file and return a SeqRecord object.\n\n Argument source is a file-like object or a path to a file.\n\n Note that a SnapGene file can only contain one sequence, so this\n iterator will always return a single record.\n \"\"\"\n super().__init__(source, mode=\"b\", fmt=\"SnapGene\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the SnapGene file.\"\"\"\n record = SeqRecord(None)\n packets = _iterate(handle)\n try:\n packet_type, length, data = next(packets)\n except StopIteration:\n raise ValueError(\"Empty file.\") from None\n\n if packet_type != 0x09:\n raise ValueError(\"The file does not start with a SnapGene cookie packet\")\n _parse_cookie_packet(length, data, record)\n\n for (packet_type, length, data) in packets:\n handler = _packet_handlers.get(packet_type)\n if handler is not None:\n handler(length, data, record)\n\n if not record.seq:\n raise ValueError(\"No DNA packet in file\")\n\n yield record\n", "path": "Bio/SeqIO/SnapGeneIO.py"}]} | 3,791 | 433 |
gh_patches_debug_19428 | rasdani/github-patches | git_diff | horovod__horovod-1904 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.8 incompatibility with nccl_built check
See: https://github.com/huge-success/sanic/issues/1774
```
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:110: in wrapper
retval = f(*args, **kwargs)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:151: in nccl_built
result = _check_extension_lambda(
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:90: in _check_extension_lambda
p.start()
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/process.py:121: in start
self._popen = self._Popen(self)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:224: in _Popen
return _default_context.get_context().Process._Popen(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:283: in _Popen
return Popen(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:32: in __init__
super().__init__(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_fork.py:19: in __init__
self._launch(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:47: in _launch
reduction.dump(process_obj, fp)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/reduction.py:60: in dump
ForkingPickler(file, protocol).dump(obj)
E AttributeError: Can't pickle local object '_check_extension_lambda.<locals>._target_fn'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/common/util.py`
Content:
```
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 # Modifications copyright (C) 2019 Uber Technologies, Inc.
3 # Modifications copyright Microsoft
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # =============================================================================
17
18 from contextlib import contextmanager
19 from multiprocessing import Process, Queue
20 import os
21 import sysconfig
22
23 EXTENSIONS = ['tensorflow', 'torch', 'mxnet']
24
25
26 def get_ext_suffix():
27 """Determine library extension for various versions of Python."""
28 ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
29 if ext_suffix:
30 return ext_suffix
31
32 ext_suffix = sysconfig.get_config_var('SO')
33 if ext_suffix:
34 return ext_suffix
35
36 return '.so'
37
38
39 def get_extension_full_path(pkg_path, *args):
40 assert len(args) >= 1
41 dir_path = os.path.join(os.path.dirname(pkg_path), *args[:-1])
42 full_path = os.path.join(dir_path, args[-1] + get_ext_suffix())
43 return full_path
44
45
46 def check_extension(ext_name, ext_env_var, pkg_path, *args):
47 full_path = get_extension_full_path(pkg_path, *args)
48 if not os.path.exists(full_path):
49 raise ImportError(
50 'Extension %s has not been built. If this is not expected, reinstall '
51 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))
52
53
54 def _check_extension_lambda(ext_base_name, fn, fn_desc, verbose):
55 """
56 Tries to load the extension in a new process. If successful, puts fn(ext)
57 to the queue or False otherwise. Mutes all stdout/stderr.
58 """
59 def _target_fn(ext_base_name, fn, fn_desc, queue, verbose):
60 import importlib
61 import sys
62 import traceback
63
64 if verbose:
65 print('Checking whether extension {ext_base_name} was {fn_desc}.'.format(
66 ext_base_name=ext_base_name, fn_desc=fn_desc))
67 else:
68 # Suppress output
69 sys.stdout = open(os.devnull, 'w')
70 sys.stderr = open(os.devnull, 'w')
71
72 try:
73 ext = importlib.import_module('.' + ext_base_name, 'horovod')
74 result = fn(ext)
75 except:
76 traceback.print_exc()
77 result = None
78
79 if verbose:
80 print('Extension {ext_base_name} {flag} {fn_desc}.'.format(
81 ext_base_name=ext_base_name, flag=('was' if result else 'was NOT'),
82 fn_desc=fn_desc))
83
84 queue.put(result)
85
86 queue = Queue()
87 p = Process(target=_target_fn,
88 args=(ext_base_name, fn, fn_desc, queue, verbose))
89 p.daemon = True
90 p.start()
91 p.join()
92 return queue.get_nowait()
93
94
95 def extension_available(ext_base_name, verbose=False):
96 available_fn = lambda ext: ext is not None
97 return _check_extension_lambda(
98 ext_base_name, available_fn, 'built', verbose) or False
99
100
101 def _cache(f):
102 cache = dict()
103
104 def wrapper(*args, **kwargs):
105 key = (args, frozenset(kwargs.items()))
106
107 if key in cache:
108 return cache[key]
109 else:
110 retval = f(*args, **kwargs)
111 cache[key] = retval
112 return retval
113
114 return wrapper
115
116
117 @_cache
118 def gpu_available(ext_base_name, verbose=False):
119 available_fn = lambda ext: ext._check_has_gpu()
120 return _check_extension_lambda(
121 ext_base_name, available_fn, 'running with GPU', verbose) or False
122
123
124 @_cache
125 def mpi_built(verbose=False):
126 for ext_base_name in EXTENSIONS:
127 built_fn = lambda ext: ext.mpi_built()
128 result = _check_extension_lambda(
129 ext_base_name, built_fn, 'built with MPI', verbose)
130 if result is not None:
131 return result
132 return False
133
134
135 @_cache
136 def gloo_built(verbose=False):
137 for ext_base_name in EXTENSIONS:
138 built_fn = lambda ext: ext.gloo_built()
139 result = _check_extension_lambda(
140 ext_base_name, built_fn, 'built with Gloo', verbose)
141 if result is not None:
142 return result
143 raise RuntimeError('Failed to determine if Gloo support has been built. '
144 'Run again with --verbose for more details.')
145
146
147 @_cache
148 def nccl_built(verbose=False):
149 for ext_base_name in EXTENSIONS:
150 built_fn = lambda ext: ext.nccl_built()
151 result = _check_extension_lambda(
152 ext_base_name, built_fn, 'built with NCCL', verbose)
153 if result is not None:
154 return result
155 raise RuntimeError('Failed to determine if NCCL support has been built. '
156 'Run again with --verbose for more details.')
157
158
159 @_cache
160 def ddl_built(verbose=False):
161 for ext_base_name in EXTENSIONS:
162 built_fn = lambda ext: ext.ddl_built()
163 result = _check_extension_lambda(
164 ext_base_name, built_fn, 'built with DDL', verbose)
165 if result is not None:
166 return result
167 raise RuntimeError('Failed to determine if DDL support has been built. '
168 'Run again with --verbose for more details.')
169
170
171 @_cache
172 def ccl_built(verbose=False):
173 for ext_base_name in EXTENSIONS:
174 built_fn = lambda ext: ext.ccl_built()
175 result = _check_extension_lambda(
176 ext_base_name, built_fn, 'built with CCL', verbose)
177 if result is not None:
178 return result
179 raise RuntimeError('Failed to determine if CCL support has been built. '
180 'Run again with --verbose for more details.')
181
182
183 @contextmanager
184 def env(**kwargs):
185 # ignore args with None values
186 for k in list(kwargs.keys()):
187 if kwargs[k] is None:
188 del kwargs[k]
189
190 # backup environment
191 backup = {}
192 for k in kwargs.keys():
193 backup[k] = os.environ.get(k)
194
195 # set new values & yield
196 for k, v in kwargs.items():
197 os.environ[k] = v
198
199 try:
200 yield
201 finally:
202 # restore environment
203 for k in kwargs.keys():
204 if backup[k] is not None:
205 os.environ[k] = backup[k]
206 else:
207 del os.environ[k]
208
209
210 def get_average_backwards_compatibility_fun(reduce_ops):
211 """
212 Handle backwards compatibility between the old average and the new op parameters.
213 Old code using the average parameter (e.g. hvd.allreduce(tensor, average=False))
214 gets unchanged behavior, but mixing old and new is disallowed (e.g. no
215 hvd.allreduce(tensor, average=False, op=hvd.Adasum)).
216 """
217 def impl(op, average):
218 if op != None:
219 if average != None:
220 raise ValueError('The op parameter supersedes average. Please provide only one of them.')
221 return op
222 elif average != None:
223 return reduce_ops.Average if average else reduce_ops.Sum
224 else:
225 return reduce_ops.Average
226 return impl
227
228
229 def num_rank_is_power_2(num_rank):
230 """
231 Tests if the given number of ranks is of power of 2. This check is required
232 for Adasum allreduce.
233 TODO support non-power of 2 ranks.
234 """
235 return num_rank != 0 and ((num_rank & (num_rank -1)) == 0)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/horovod/common/util.py b/horovod/common/util.py
--- a/horovod/common/util.py
+++ b/horovod/common/util.py
@@ -15,11 +15,15 @@
# limitations under the License.
# =============================================================================
-from contextlib import contextmanager
-from multiprocessing import Process, Queue
+import multiprocessing
import os
+import sys
import sysconfig
+from contextlib import contextmanager
+
+
+_PY3 = sys.version_info[0] == 3
EXTENSIONS = ['tensorflow', 'torch', 'mxnet']
@@ -83,9 +87,11 @@
queue.put(result)
- queue = Queue()
- p = Process(target=_target_fn,
- args=(ext_base_name, fn, fn_desc, queue, verbose))
+ # 'fork' is required because horovodrun is a frozen executable
+ ctx = multiprocessing.get_context('fork') if _PY3 else multiprocessing
+ queue = ctx.Queue()
+ p = ctx.Process(target=_target_fn,
+ args=(ext_base_name, fn, fn_desc, queue, verbose))
p.daemon = True
p.start()
p.join()
| {"golden_diff": "diff --git a/horovod/common/util.py b/horovod/common/util.py\n--- a/horovod/common/util.py\n+++ b/horovod/common/util.py\n@@ -15,11 +15,15 @@\n # limitations under the License.\n # =============================================================================\n \n-from contextlib import contextmanager\n-from multiprocessing import Process, Queue\n+import multiprocessing\n import os\n+import sys\n import sysconfig\n \n+from contextlib import contextmanager\n+\n+\n+_PY3 = sys.version_info[0] == 3\n EXTENSIONS = ['tensorflow', 'torch', 'mxnet']\n \n \n@@ -83,9 +87,11 @@\n \n queue.put(result)\n \n- queue = Queue()\n- p = Process(target=_target_fn,\n- args=(ext_base_name, fn, fn_desc, queue, verbose))\n+ # 'fork' is required because horovodrun is a frozen executable\n+ ctx = multiprocessing.get_context('fork') if _PY3 else multiprocessing\n+ queue = ctx.Queue()\n+ p = ctx.Process(target=_target_fn,\n+ args=(ext_base_name, fn, fn_desc, queue, verbose))\n p.daemon = True\n p.start()\n p.join()\n", "issue": "Python 3.8 incompatibility with nccl_built check\nSee: https://github.com/huge-success/sanic/issues/1774\r\n\r\n```\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:110: in wrapper\r\n retval = f(*args, **kwargs)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:151: in nccl_built\r\n result = _check_extension_lambda(\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:90: in _check_extension_lambda\r\n p.start()\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/process.py:121: in start\r\n self._popen = self._Popen(self)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:224: in _Popen\r\n return _default_context.get_context().Process._Popen(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:283: in _Popen\r\n return Popen(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:32: in __init__\r\n super().__init__(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_fork.py:19: in __init__\r\n self._launch(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:47: in _launch\r\n reduction.dump(process_obj, fp)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/reduction.py:60: in dump\r\n ForkingPickler(file, protocol).dump(obj)\r\nE AttributeError: Can't pickle local object '_check_extension_lambda.<locals>._target_fn'\r\n```\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2019 Uber Technologies, Inc.\n# Modifications copyright Microsoft\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nfrom contextlib import contextmanager\nfrom multiprocessing import Process, Queue\nimport os\nimport sysconfig\n\nEXTENSIONS = ['tensorflow', 'torch', 'mxnet']\n\n\ndef get_ext_suffix():\n \"\"\"Determine library extension for various versions of Python.\"\"\"\n ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')\n if ext_suffix:\n return ext_suffix\n\n ext_suffix = sysconfig.get_config_var('SO')\n if ext_suffix:\n return ext_suffix\n\n return '.so'\n\n\ndef get_extension_full_path(pkg_path, *args):\n assert len(args) >= 1\n dir_path = os.path.join(os.path.dirname(pkg_path), *args[:-1])\n full_path = os.path.join(dir_path, args[-1] + get_ext_suffix())\n return full_path\n\n\ndef check_extension(ext_name, ext_env_var, pkg_path, *args):\n full_path = get_extension_full_path(pkg_path, *args)\n if not os.path.exists(full_path):\n raise ImportError(\n 'Extension %s has not been built. If this is not expected, reinstall '\n 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))\n\n\ndef _check_extension_lambda(ext_base_name, fn, fn_desc, verbose):\n \"\"\"\n Tries to load the extension in a new process. If successful, puts fn(ext)\n to the queue or False otherwise. Mutes all stdout/stderr.\n \"\"\"\n def _target_fn(ext_base_name, fn, fn_desc, queue, verbose):\n import importlib\n import sys\n import traceback\n\n if verbose:\n print('Checking whether extension {ext_base_name} was {fn_desc}.'.format(\n ext_base_name=ext_base_name, fn_desc=fn_desc))\n else:\n # Suppress output\n sys.stdout = open(os.devnull, 'w')\n sys.stderr = open(os.devnull, 'w')\n\n try:\n ext = importlib.import_module('.' + ext_base_name, 'horovod')\n result = fn(ext)\n except:\n traceback.print_exc()\n result = None\n\n if verbose:\n print('Extension {ext_base_name} {flag} {fn_desc}.'.format(\n ext_base_name=ext_base_name, flag=('was' if result else 'was NOT'),\n fn_desc=fn_desc))\n\n queue.put(result)\n\n queue = Queue()\n p = Process(target=_target_fn,\n args=(ext_base_name, fn, fn_desc, queue, verbose))\n p.daemon = True\n p.start()\n p.join()\n return queue.get_nowait()\n\n\ndef extension_available(ext_base_name, verbose=False):\n available_fn = lambda ext: ext is not None\n return _check_extension_lambda(\n ext_base_name, available_fn, 'built', verbose) or False\n\n\ndef _cache(f):\n cache = dict()\n\n def wrapper(*args, **kwargs):\n key = (args, frozenset(kwargs.items()))\n\n if key in cache:\n return cache[key]\n else:\n retval = f(*args, **kwargs)\n cache[key] = retval\n return retval\n\n return wrapper\n\n\n@_cache\ndef gpu_available(ext_base_name, verbose=False):\n available_fn = lambda ext: ext._check_has_gpu()\n return _check_extension_lambda(\n ext_base_name, available_fn, 'running with GPU', verbose) or False\n\n\n@_cache\ndef mpi_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.mpi_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with MPI', verbose)\n if result is not None:\n return result\n return False\n\n\n@_cache\ndef gloo_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.gloo_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with Gloo', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if Gloo support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef nccl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.nccl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with NCCL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if NCCL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef ddl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.ddl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with DDL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if DDL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef ccl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.ccl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with CCL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if CCL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@contextmanager\ndef env(**kwargs):\n # ignore args with None values\n for k in list(kwargs.keys()):\n if kwargs[k] is None:\n del kwargs[k]\n\n # backup environment\n backup = {}\n for k in kwargs.keys():\n backup[k] = os.environ.get(k)\n\n # set new values & yield\n for k, v in kwargs.items():\n os.environ[k] = v\n\n try:\n yield\n finally:\n # restore environment\n for k in kwargs.keys():\n if backup[k] is not None:\n os.environ[k] = backup[k]\n else:\n del os.environ[k]\n\n\ndef get_average_backwards_compatibility_fun(reduce_ops):\n \"\"\"\n Handle backwards compatibility between the old average and the new op parameters.\n Old code using the average parameter (e.g. hvd.allreduce(tensor, average=False))\n gets unchanged behavior, but mixing old and new is disallowed (e.g. no\n hvd.allreduce(tensor, average=False, op=hvd.Adasum)).\n \"\"\"\n def impl(op, average):\n if op != None:\n if average != None:\n raise ValueError('The op parameter supersedes average. Please provide only one of them.')\n return op\n elif average != None:\n return reduce_ops.Average if average else reduce_ops.Sum\n else:\n return reduce_ops.Average\n return impl\n\n\ndef num_rank_is_power_2(num_rank):\n \"\"\"\n Tests if the given number of ranks is of power of 2. This check is required\n for Adasum allreduce.\n TODO support non-power of 2 ranks.\n \"\"\"\n return num_rank != 0 and ((num_rank & (num_rank -1)) == 0)\n", "path": "horovod/common/util.py"}], "after_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2019 Uber Technologies, Inc.\n# Modifications copyright Microsoft\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport multiprocessing\nimport os\nimport sys\nimport sysconfig\n\nfrom contextlib import contextmanager\n\n\n_PY3 = sys.version_info[0] == 3\nEXTENSIONS = ['tensorflow', 'torch', 'mxnet']\n\n\ndef get_ext_suffix():\n \"\"\"Determine library extension for various versions of Python.\"\"\"\n ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')\n if ext_suffix:\n return ext_suffix\n\n ext_suffix = sysconfig.get_config_var('SO')\n if ext_suffix:\n return ext_suffix\n\n return '.so'\n\n\ndef get_extension_full_path(pkg_path, *args):\n assert len(args) >= 1\n dir_path = os.path.join(os.path.dirname(pkg_path), *args[:-1])\n full_path = os.path.join(dir_path, args[-1] + get_ext_suffix())\n return full_path\n\n\ndef check_extension(ext_name, ext_env_var, pkg_path, *args):\n full_path = get_extension_full_path(pkg_path, *args)\n if not os.path.exists(full_path):\n raise ImportError(\n 'Extension %s has not been built. If this is not expected, reinstall '\n 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))\n\n\ndef _check_extension_lambda(ext_base_name, fn, fn_desc, verbose):\n \"\"\"\n Tries to load the extension in a new process. If successful, puts fn(ext)\n to the queue or False otherwise. Mutes all stdout/stderr.\n \"\"\"\n def _target_fn(ext_base_name, fn, fn_desc, queue, verbose):\n import importlib\n import sys\n import traceback\n\n if verbose:\n print('Checking whether extension {ext_base_name} was {fn_desc}.'.format(\n ext_base_name=ext_base_name, fn_desc=fn_desc))\n else:\n # Suppress output\n sys.stdout = open(os.devnull, 'w')\n sys.stderr = open(os.devnull, 'w')\n\n try:\n ext = importlib.import_module('.' + ext_base_name, 'horovod')\n result = fn(ext)\n except:\n traceback.print_exc()\n result = None\n\n if verbose:\n print('Extension {ext_base_name} {flag} {fn_desc}.'.format(\n ext_base_name=ext_base_name, flag=('was' if result else 'was NOT'),\n fn_desc=fn_desc))\n\n queue.put(result)\n\n # 'fork' is required because horovodrun is a frozen executable\n ctx = multiprocessing.get_context('fork') if _PY3 else multiprocessing\n queue = ctx.Queue()\n p = ctx.Process(target=_target_fn,\n args=(ext_base_name, fn, fn_desc, queue, verbose))\n p.daemon = True\n p.start()\n p.join()\n return queue.get_nowait()\n\n\ndef extension_available(ext_base_name, verbose=False):\n available_fn = lambda ext: ext is not None\n return _check_extension_lambda(\n ext_base_name, available_fn, 'built', verbose) or False\n\n\ndef _cache(f):\n cache = dict()\n\n def wrapper(*args, **kwargs):\n key = (args, frozenset(kwargs.items()))\n\n if key in cache:\n return cache[key]\n else:\n retval = f(*args, **kwargs)\n cache[key] = retval\n return retval\n\n return wrapper\n\n\n@_cache\ndef gpu_available(ext_base_name, verbose=False):\n available_fn = lambda ext: ext._check_has_gpu()\n return _check_extension_lambda(\n ext_base_name, available_fn, 'running with GPU', verbose) or False\n\n\n@_cache\ndef mpi_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.mpi_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with MPI', verbose)\n if result is not None:\n return result\n return False\n\n\n@_cache\ndef gloo_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.gloo_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with Gloo', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if Gloo support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef nccl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.nccl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with NCCL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if NCCL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef ddl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.ddl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with DDL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if DDL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef ccl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.ccl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with CCL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if CCL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@contextmanager\ndef env(**kwargs):\n # ignore args with None values\n for k in list(kwargs.keys()):\n if kwargs[k] is None:\n del kwargs[k]\n\n # backup environment\n backup = {}\n for k in kwargs.keys():\n backup[k] = os.environ.get(k)\n\n # set new values & yield\n for k, v in kwargs.items():\n os.environ[k] = v\n\n try:\n yield\n finally:\n # restore environment\n for k in kwargs.keys():\n if backup[k] is not None:\n os.environ[k] = backup[k]\n else:\n del os.environ[k]\n\n\ndef get_average_backwards_compatibility_fun(reduce_ops):\n \"\"\"\n Handle backwards compatibility between the old average and the new op parameters.\n Old code using the average parameter (e.g. hvd.allreduce(tensor, average=False))\n gets unchanged behavior, but mixing old and new is disallowed (e.g. no\n hvd.allreduce(tensor, average=False, op=hvd.Adasum)).\n \"\"\"\n def impl(op, average):\n if op != None:\n if average != None:\n raise ValueError('The op parameter supersedes average. Please provide only one of them.')\n return op\n elif average != None:\n return reduce_ops.Average if average else reduce_ops.Sum\n else:\n return reduce_ops.Average\n return impl\n\n\ndef num_rank_is_power_2(num_rank):\n \"\"\"\n Tests if the given number of ranks is of power of 2. This check is required\n for Adasum allreduce.\n TODO support non-power of 2 ranks.\n \"\"\"\n return num_rank != 0 and ((num_rank & (num_rank -1)) == 0)\n", "path": "horovod/common/util.py"}]} | 3,144 | 268 |
gh_patches_debug_2398 | rasdani/github-patches | git_diff | microsoft__MLOS-477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SMAC optimizer messes up mlos_bench logging
SMAC optimizer completely overrides our logging setup and installs its own formatter, output handler, and so on. As a result, as soon as SMAC optimizer is initialized, mlos_bench stops writing to its log file, and all logging goes to stdout, in different format, and at different log level (always INFO). We need to find a way to make SMAC use our logger instead of setting up its own from scratch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Contains the wrapper class for SMAC Bayesian optimizers.
7 See Also: <https://automl.github.io/SMAC3/main/index.html>
8 """
9
10 from pathlib import Path
11 from typing import Dict, List, Optional, TYPE_CHECKING
12 from tempfile import TemporaryDirectory
13
14 import ConfigSpace
15 import numpy.typing as npt
16 import pandas as pd
17
18 from mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer
19 from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
20
21
22 class SmacOptimizer(BaseBayesianOptimizer):
23 """Wrapper class for SMAC based Bayesian optimization.
24
25 Parameters
26 ----------
27 parameter_space : ConfigSpace.ConfigurationSpace
28 The parameter space to optimize.
29
30 space_adapter : BaseSpaceAdapter
31 The space adapter class to employ for parameter space transformations.
32
33 seed : Optional[int]
34 By default SMAC uses a known seed (0) to keep results reproducible.
35 However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.
36
37 run_name : Optional[str]
38 Name of this run. This is used to easily distinguish across different runs.
39 If set to `None` (default), SMAC will generate a hash from metadata.
40
41 output_directory : Optional[str]
42 The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.
43
44 max_trials : int
45 Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.
46 Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.
47
48 n_random_init : Optional[int]
49 Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.
50
51 n_random_probability: Optional[float]
52 Probability of choosing to evaluate a random configuration during optimization.
53 Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.
54 """
55
56 def __init__(self, *, # pylint: disable=too-many-locals
57 parameter_space: ConfigSpace.ConfigurationSpace,
58 space_adapter: Optional[BaseSpaceAdapter] = None,
59 seed: Optional[int] = 0,
60 run_name: Optional[str] = None,
61 output_directory: Optional[str] = None,
62 max_trials: int = 100,
63 n_random_init: Optional[int] = 10,
64 n_random_probability: Optional[float] = 0.1):
65
66 super().__init__(
67 parameter_space=parameter_space,
68 space_adapter=space_adapter,
69 )
70
71 # Declare at the top because we need it in __del__/cleanup()
72 self._temp_output_directory: Optional[TemporaryDirectory] = None
73
74 # pylint: disable=import-outside-toplevel
75 from smac import HyperparameterOptimizationFacade as Optimizer_Smac
76 from smac import Scenario
77 from smac.intensifier.abstract_intensifier import AbstractIntensifier
78 from smac.initial_design import LatinHypercubeInitialDesign
79 from smac.main.config_selector import ConfigSelector
80 from smac.random_design.probability_design import ProbabilityRandomDesign
81 from smac.runhistory import TrialInfo
82
83 # Store for TrialInfo instances returned by .ask()
84 self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}
85
86 # The default when not specified is to use a known seed (0) to keep results reproducible.
87 # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.
88 # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario
89 seed = -1 if seed is None else seed
90
91 # Create temporary directory for SMAC output (if none provided)
92 if output_directory is None:
93 # pylint: disable=consider-using-with
94 try:
95 self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10
96 except TypeError:
97 self._temp_output_directory = TemporaryDirectory()
98 output_directory = self._temp_output_directory.name
99
100 scenario: Scenario = Scenario(
101 self.optimizer_parameter_space,
102 name=run_name,
103 output_directory=Path(output_directory),
104 deterministic=True,
105 n_trials=max_trials,
106 seed=seed or -1, # if -1, SMAC will generate a random seed internally
107 n_workers=1, # Use a single thread for evaluating trials
108 )
109 intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)
110 config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)
111
112 initial_design: Optional[LatinHypercubeInitialDesign] = None
113 if n_random_init is not None:
114 initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)
115 random_design: Optional[ProbabilityRandomDesign] = None
116 if n_random_probability is not None:
117 random_design = ProbabilityRandomDesign(probability=n_random_probability)
118
119 self.base_optimizer = Optimizer_Smac(
120 scenario,
121 SmacOptimizer._dummy_target_func,
122 initial_design=initial_design,
123 intensifier=intensifier,
124 random_design=random_design,
125 config_selector=config_selector,
126 overwrite=True,
127 )
128
129 def __del__(self) -> None:
130 # Best-effort attempt to clean up, in case the user forgets to call .cleanup()
131 self.cleanup()
132
133 @staticmethod
134 def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:
135 """Dummy target function for SMAC optimizer.
136
137 Since we only use the ask-and-tell interface, this is never called.
138
139 Parameters
140 ----------
141 config : ConfigSpace.Configuration
142 Configuration to evaluate.
143
144 seed : int
145 Random seed to use for the target function. Not actually used.
146 """
147 # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API
148 # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946
149 raise RuntimeError('This function should never be called.')
150
151 def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:
152 """Registers the given configurations and scores.
153
154 Parameters
155 ----------
156 configurations : pd.DataFrame
157 Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
158
159 scores : pd.Series
160 Scores from running the configurations. The index is the same as the index of the configurations.
161
162 context : pd.DataFrame
163 Not Yet Implemented.
164 """
165 from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel
166
167 if context is not None:
168 raise NotImplementedError()
169
170 # Register each trial (one-by-one)
171 for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):
172 # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance
173 info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))
174 value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)
175 self.base_optimizer.tell(info, value, save=False)
176
177 # Save optimizer once we register all configs
178 self.base_optimizer.optimizer.save()
179
180 def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:
181 """Suggests a new configuration.
182
183 Parameters
184 ----------
185 context : pd.DataFrame
186 Not Yet Implemented.
187
188 Returns
189 -------
190 configuration : pd.DataFrame
191 Pandas dataframe with a single row. Column names are the parameter names.
192 """
193 if TYPE_CHECKING:
194 from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel
195
196 if context is not None:
197 raise NotImplementedError()
198
199 trial: TrialInfo = self.base_optimizer.ask()
200 self.trial_info_map[trial.config] = trial
201 return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))
202
203 def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:
204 raise NotImplementedError()
205
206 def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:
207 from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel
208
209 if context is not None:
210 raise NotImplementedError()
211 if self._space_adapter:
212 raise NotImplementedError()
213
214 # pylint: disable=protected-access
215 if len(self._observations) < self.base_optimizer._initial_design._n_configs:
216 raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')
217 if self.base_optimizer._config_selector._model is None:
218 raise RuntimeError('Surrogate model is not yet trained')
219
220 configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))
221 mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)
222 return mean_predictions.reshape(-1,)
223
224 def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:
225 if context is not None:
226 raise NotImplementedError()
227 if self._space_adapter:
228 raise NotImplementedError()
229
230 # pylint: disable=protected-access
231 if self.base_optimizer._config_selector._acquisition_function is None:
232 raise RuntimeError('Acquisition function is not yet initialized')
233
234 configs: list = self._to_configspace_configs(configurations)
235 return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)
236
237 def cleanup(self) -> None:
238 if self._temp_output_directory is not None:
239 self._temp_output_directory.cleanup()
240 self._temp_output_directory = None
241
242 def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:
243 """Convert a dataframe of configurations to a list of ConfigSpace configurations.
244
245 Parameters
246 ----------
247 configurations : pd.DataFrame
248 Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
249
250 Returns
251 -------
252 configurations : list
253 List of ConfigSpace configurations.
254 """
255 return [
256 ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())
257 for (_, config) in configurations.iterrows()
258 ]
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
--- a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
+++ b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
@@ -124,6 +124,7 @@
random_design=random_design,
config_selector=config_selector,
overwrite=True,
+ logging_level=False, # Use the existing logger
)
def __del__(self) -> None:
| {"golden_diff": "diff --git a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py\n--- a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py\n+++ b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py\n@@ -124,6 +124,7 @@\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n+ logging_level=False, # Use the existing logger\n )\n \n def __del__(self) -> None:\n", "issue": "SMAC optimizer messes up mlos_bench logging\nSMAC optimizer completely overrides our logging setup and installs its own formatter, output handler, and so on. As a result, as soon as SMAC optimizer is initialized, mlos_bench stops writing to its log file, and all logging goes to stdout, in different format, and at different log level (always INFO). We need to find a way to make SMAC use our logger instead of setting up its own from scratch\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nContains the wrapper class for SMAC Bayesian optimizers.\nSee Also: <https://automl.github.io/SMAC3/main/index.html>\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, TYPE_CHECKING\nfrom tempfile import TemporaryDirectory\n\nimport ConfigSpace\nimport numpy.typing as npt\nimport pandas as pd\n\nfrom mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer\nfrom mlos_core.spaces.adapters.adapter import BaseSpaceAdapter\n\n\nclass SmacOptimizer(BaseBayesianOptimizer):\n \"\"\"Wrapper class for SMAC based Bayesian optimization.\n\n Parameters\n ----------\n parameter_space : ConfigSpace.ConfigurationSpace\n The parameter space to optimize.\n\n space_adapter : BaseSpaceAdapter\n The space adapter class to employ for parameter space transformations.\n\n seed : Optional[int]\n By default SMAC uses a known seed (0) to keep results reproducible.\n However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n\n run_name : Optional[str]\n Name of this run. This is used to easily distinguish across different runs.\n If set to `None` (default), SMAC will generate a hash from metadata.\n\n output_directory : Optional[str]\n The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.\n\n max_trials : int\n Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.\n Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.\n\n n_random_init : Optional[int]\n Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.\n\n n_random_probability: Optional[float]\n Probability of choosing to evaluate a random configuration during optimization.\n Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.\n \"\"\"\n\n def __init__(self, *, # pylint: disable=too-many-locals\n parameter_space: ConfigSpace.ConfigurationSpace,\n space_adapter: Optional[BaseSpaceAdapter] = None,\n seed: Optional[int] = 0,\n run_name: Optional[str] = None,\n output_directory: Optional[str] = None,\n max_trials: int = 100,\n n_random_init: Optional[int] = 10,\n n_random_probability: Optional[float] = 0.1):\n\n super().__init__(\n parameter_space=parameter_space,\n space_adapter=space_adapter,\n )\n\n # Declare at the top because we need it in __del__/cleanup()\n self._temp_output_directory: Optional[TemporaryDirectory] = None\n\n # pylint: disable=import-outside-toplevel\n from smac import HyperparameterOptimizationFacade as Optimizer_Smac\n from smac import Scenario\n from smac.intensifier.abstract_intensifier import AbstractIntensifier\n from smac.initial_design import LatinHypercubeInitialDesign\n from smac.main.config_selector import ConfigSelector\n from smac.random_design.probability_design import ProbabilityRandomDesign\n from smac.runhistory import TrialInfo\n\n # Store for TrialInfo instances returned by .ask()\n self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}\n\n # The default when not specified is to use a known seed (0) to keep results reproducible.\n # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario\n seed = -1 if seed is None else seed\n\n # Create temporary directory for SMAC output (if none provided)\n if output_directory is None:\n # pylint: disable=consider-using-with\n try:\n self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10\n except TypeError:\n self._temp_output_directory = TemporaryDirectory()\n output_directory = self._temp_output_directory.name\n\n scenario: Scenario = Scenario(\n self.optimizer_parameter_space,\n name=run_name,\n output_directory=Path(output_directory),\n deterministic=True,\n n_trials=max_trials,\n seed=seed or -1, # if -1, SMAC will generate a random seed internally\n n_workers=1, # Use a single thread for evaluating trials\n )\n intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)\n config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)\n\n initial_design: Optional[LatinHypercubeInitialDesign] = None\n if n_random_init is not None:\n initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)\n random_design: Optional[ProbabilityRandomDesign] = None\n if n_random_probability is not None:\n random_design = ProbabilityRandomDesign(probability=n_random_probability)\n\n self.base_optimizer = Optimizer_Smac(\n scenario,\n SmacOptimizer._dummy_target_func,\n initial_design=initial_design,\n intensifier=intensifier,\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n )\n\n def __del__(self) -> None:\n # Best-effort attempt to clean up, in case the user forgets to call .cleanup()\n self.cleanup()\n\n @staticmethod\n def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:\n \"\"\"Dummy target function for SMAC optimizer.\n\n Since we only use the ask-and-tell interface, this is never called.\n\n Parameters\n ----------\n config : ConfigSpace.Configuration\n Configuration to evaluate.\n\n seed : int\n Random seed to use for the target function. Not actually used.\n \"\"\"\n # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API\n # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946\n raise RuntimeError('This function should never be called.')\n\n def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:\n \"\"\"Registers the given configurations and scores.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n scores : pd.Series\n Scores from running the configurations. The index is the same as the index of the configurations.\n\n context : pd.DataFrame\n Not Yet Implemented.\n \"\"\"\n from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n # Register each trial (one-by-one)\n for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):\n # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance\n info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))\n value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)\n self.base_optimizer.tell(info, value, save=False)\n\n # Save optimizer once we register all configs\n self.base_optimizer.optimizer.save()\n\n def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n \"\"\"Suggests a new configuration.\n\n Parameters\n ----------\n context : pd.DataFrame\n Not Yet Implemented.\n\n Returns\n -------\n configuration : pd.DataFrame\n Pandas dataframe with a single row. Column names are the parameter names.\n \"\"\"\n if TYPE_CHECKING:\n from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n trial: TrialInfo = self.base_optimizer.ask()\n self.trial_info_map[trial.config] = trial\n return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))\n\n def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:\n raise NotImplementedError()\n\n def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if len(self._observations) < self.base_optimizer._initial_design._n_configs:\n raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')\n if self.base_optimizer._config_selector._model is None:\n raise RuntimeError('Surrogate model is not yet trained')\n\n configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))\n mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)\n return mean_predictions.reshape(-1,)\n\n def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if self.base_optimizer._config_selector._acquisition_function is None:\n raise RuntimeError('Acquisition function is not yet initialized')\n\n configs: list = self._to_configspace_configs(configurations)\n return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)\n\n def cleanup(self) -> None:\n if self._temp_output_directory is not None:\n self._temp_output_directory.cleanup()\n self._temp_output_directory = None\n\n def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:\n \"\"\"Convert a dataframe of configurations to a list of ConfigSpace configurations.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n Returns\n -------\n configurations : list\n List of ConfigSpace configurations.\n \"\"\"\n return [\n ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())\n for (_, config) in configurations.iterrows()\n ]\n", "path": "mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py"}], "after_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nContains the wrapper class for SMAC Bayesian optimizers.\nSee Also: <https://automl.github.io/SMAC3/main/index.html>\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, TYPE_CHECKING\nfrom tempfile import TemporaryDirectory\n\nimport ConfigSpace\nimport numpy.typing as npt\nimport pandas as pd\n\nfrom mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer\nfrom mlos_core.spaces.adapters.adapter import BaseSpaceAdapter\n\n\nclass SmacOptimizer(BaseBayesianOptimizer):\n \"\"\"Wrapper class for SMAC based Bayesian optimization.\n\n Parameters\n ----------\n parameter_space : ConfigSpace.ConfigurationSpace\n The parameter space to optimize.\n\n space_adapter : BaseSpaceAdapter\n The space adapter class to employ for parameter space transformations.\n\n seed : Optional[int]\n By default SMAC uses a known seed (0) to keep results reproducible.\n However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n\n run_name : Optional[str]\n Name of this run. This is used to easily distinguish across different runs.\n If set to `None` (default), SMAC will generate a hash from metadata.\n\n output_directory : Optional[str]\n The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.\n\n max_trials : int\n Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.\n Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.\n\n n_random_init : Optional[int]\n Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.\n\n n_random_probability: Optional[float]\n Probability of choosing to evaluate a random configuration during optimization.\n Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.\n \"\"\"\n\n def __init__(self, *, # pylint: disable=too-many-locals\n parameter_space: ConfigSpace.ConfigurationSpace,\n space_adapter: Optional[BaseSpaceAdapter] = None,\n seed: Optional[int] = 0,\n run_name: Optional[str] = None,\n output_directory: Optional[str] = None,\n max_trials: int = 100,\n n_random_init: Optional[int] = 10,\n n_random_probability: Optional[float] = 0.1):\n\n super().__init__(\n parameter_space=parameter_space,\n space_adapter=space_adapter,\n )\n\n # Declare at the top because we need it in __del__/cleanup()\n self._temp_output_directory: Optional[TemporaryDirectory] = None\n\n # pylint: disable=import-outside-toplevel\n from smac import HyperparameterOptimizationFacade as Optimizer_Smac\n from smac import Scenario\n from smac.intensifier.abstract_intensifier import AbstractIntensifier\n from smac.initial_design import LatinHypercubeInitialDesign\n from smac.main.config_selector import ConfigSelector\n from smac.random_design.probability_design import ProbabilityRandomDesign\n from smac.runhistory import TrialInfo\n\n # Store for TrialInfo instances returned by .ask()\n self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}\n\n # The default when not specified is to use a known seed (0) to keep results reproducible.\n # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario\n seed = -1 if seed is None else seed\n\n # Create temporary directory for SMAC output (if none provided)\n if output_directory is None:\n # pylint: disable=consider-using-with\n try:\n self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10\n except TypeError:\n self._temp_output_directory = TemporaryDirectory()\n output_directory = self._temp_output_directory.name\n\n scenario: Scenario = Scenario(\n self.optimizer_parameter_space,\n name=run_name,\n output_directory=Path(output_directory),\n deterministic=True,\n n_trials=max_trials,\n seed=seed or -1, # if -1, SMAC will generate a random seed internally\n n_workers=1, # Use a single thread for evaluating trials\n )\n intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)\n config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)\n\n initial_design: Optional[LatinHypercubeInitialDesign] = None\n if n_random_init is not None:\n initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)\n random_design: Optional[ProbabilityRandomDesign] = None\n if n_random_probability is not None:\n random_design = ProbabilityRandomDesign(probability=n_random_probability)\n\n self.base_optimizer = Optimizer_Smac(\n scenario,\n SmacOptimizer._dummy_target_func,\n initial_design=initial_design,\n intensifier=intensifier,\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n logging_level=False, # Use the existing logger\n )\n\n def __del__(self) -> None:\n # Best-effort attempt to clean up, in case the user forgets to call .cleanup()\n self.cleanup()\n\n @staticmethod\n def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:\n \"\"\"Dummy target function for SMAC optimizer.\n\n Since we only use the ask-and-tell interface, this is never called.\n\n Parameters\n ----------\n config : ConfigSpace.Configuration\n Configuration to evaluate.\n\n seed : int\n Random seed to use for the target function. Not actually used.\n \"\"\"\n # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API\n # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946\n raise RuntimeError('This function should never be called.')\n\n def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:\n \"\"\"Registers the given configurations and scores.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n scores : pd.Series\n Scores from running the configurations. The index is the same as the index of the configurations.\n\n context : pd.DataFrame\n Not Yet Implemented.\n \"\"\"\n from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n # Register each trial (one-by-one)\n for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):\n # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance\n info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))\n value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)\n self.base_optimizer.tell(info, value, save=False)\n\n # Save optimizer once we register all configs\n self.base_optimizer.optimizer.save()\n\n def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n \"\"\"Suggests a new configuration.\n\n Parameters\n ----------\n context : pd.DataFrame\n Not Yet Implemented.\n\n Returns\n -------\n configuration : pd.DataFrame\n Pandas dataframe with a single row. Column names are the parameter names.\n \"\"\"\n if TYPE_CHECKING:\n from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n trial: TrialInfo = self.base_optimizer.ask()\n self.trial_info_map[trial.config] = trial\n return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))\n\n def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:\n raise NotImplementedError()\n\n def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if len(self._observations) < self.base_optimizer._initial_design._n_configs:\n raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')\n if self.base_optimizer._config_selector._model is None:\n raise RuntimeError('Surrogate model is not yet trained')\n\n configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))\n mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)\n return mean_predictions.reshape(-1,)\n\n def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if self.base_optimizer._config_selector._acquisition_function is None:\n raise RuntimeError('Acquisition function is not yet initialized')\n\n configs: list = self._to_configspace_configs(configurations)\n return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)\n\n def cleanup(self) -> None:\n if self._temp_output_directory is not None:\n self._temp_output_directory.cleanup()\n self._temp_output_directory = None\n\n def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:\n \"\"\"Convert a dataframe of configurations to a list of ConfigSpace configurations.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n Returns\n -------\n configurations : list\n List of ConfigSpace configurations.\n \"\"\"\n return [\n ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())\n for (_, config) in configurations.iterrows()\n ]\n", "path": "mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py"}]} | 3,350 | 144 |
gh_patches_debug_4620 | rasdani/github-patches | git_diff | getmoto__moto-1969 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Listing CloudFormation stacks should include deleted stacks
Using the AWS API directly returns stacks with a status `DELETE_COMPLETE`; using moto those stacks are not returned.
The existing unit tests actually test for this incorrect behaviour:
https://github.com/spulec/moto/blob/4a275cc/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py#L516-L518
The tests should be updated and the `list_stacks` method on the backend should include the `self.deleted_stacks` data (https://github.com/spulec/moto/blob/4a275cc/moto/cloudformation/models.py#L225-L226)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/cloudformation/models.py`
Content:
```
1 from __future__ import unicode_literals
2 from datetime import datetime
3 import json
4 import yaml
5 import uuid
6
7 import boto.cloudformation
8 from moto.compat import OrderedDict
9 from moto.core import BaseBackend, BaseModel
10
11 from .parsing import ResourceMap, OutputMap
12 from .utils import (
13 generate_changeset_id,
14 generate_stack_id,
15 yaml_tag_constructor,
16 )
17 from .exceptions import ValidationError
18
19
20 class FakeStack(BaseModel):
21
22 def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):
23 self.stack_id = stack_id
24 self.name = name
25 self.template = template
26 self._parse_template()
27 self.parameters = parameters
28 self.region_name = region_name
29 self.notification_arns = notification_arns if notification_arns else []
30 self.role_arn = role_arn
31 self.tags = tags if tags else {}
32 self.events = []
33 if create_change_set:
34 self._add_stack_event("REVIEW_IN_PROGRESS",
35 resource_status_reason="User Initiated")
36 else:
37 self._add_stack_event("CREATE_IN_PROGRESS",
38 resource_status_reason="User Initiated")
39
40 self.description = self.template_dict.get('Description')
41 self.cross_stack_resources = cross_stack_resources or {}
42 self.resource_map = self._create_resource_map()
43 self.output_map = self._create_output_map()
44 self._add_stack_event("CREATE_COMPLETE")
45 self.status = 'CREATE_COMPLETE'
46
47 def _create_resource_map(self):
48 resource_map = ResourceMap(
49 self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict, self.cross_stack_resources)
50 resource_map.create()
51 return resource_map
52
53 def _create_output_map(self):
54 output_map = OutputMap(self.resource_map, self.template_dict, self.stack_id)
55 output_map.create()
56 return output_map
57
58 def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):
59 self.events.append(FakeEvent(
60 stack_id=self.stack_id,
61 stack_name=self.name,
62 logical_resource_id=self.name,
63 physical_resource_id=self.stack_id,
64 resource_type="AWS::CloudFormation::Stack",
65 resource_status=resource_status,
66 resource_status_reason=resource_status_reason,
67 resource_properties=resource_properties,
68 ))
69
70 def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):
71 # not used yet... feel free to help yourself
72 resource = self.resource_map[logical_resource_id]
73 self.events.append(FakeEvent(
74 stack_id=self.stack_id,
75 stack_name=self.name,
76 logical_resource_id=logical_resource_id,
77 physical_resource_id=resource.physical_resource_id,
78 resource_type=resource.type,
79 resource_status=resource_status,
80 resource_status_reason=resource_status_reason,
81 resource_properties=resource_properties,
82 ))
83
84 def _parse_template(self):
85 yaml.add_multi_constructor('', yaml_tag_constructor)
86 try:
87 self.template_dict = yaml.load(self.template)
88 except yaml.parser.ParserError:
89 self.template_dict = json.loads(self.template)
90
91 @property
92 def stack_parameters(self):
93 return self.resource_map.resolved_parameters
94
95 @property
96 def stack_resources(self):
97 return self.resource_map.values()
98
99 @property
100 def stack_outputs(self):
101 return self.output_map.values()
102
103 @property
104 def exports(self):
105 return self.output_map.exports
106
107 def update(self, template, role_arn=None, parameters=None, tags=None):
108 self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated")
109 self.template = template
110 self._parse_template()
111 self.resource_map.update(self.template_dict, parameters)
112 self.output_map = self._create_output_map()
113 self._add_stack_event("UPDATE_COMPLETE")
114 self.status = "UPDATE_COMPLETE"
115 self.role_arn = role_arn
116 # only overwrite tags if passed
117 if tags is not None:
118 self.tags = tags
119 # TODO: update tags in the resource map
120
121 def delete(self):
122 self._add_stack_event("DELETE_IN_PROGRESS",
123 resource_status_reason="User Initiated")
124 self.resource_map.delete()
125 self._add_stack_event("DELETE_COMPLETE")
126 self.status = "DELETE_COMPLETE"
127
128
129 class FakeEvent(BaseModel):
130
131 def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
132 self.stack_id = stack_id
133 self.stack_name = stack_name
134 self.logical_resource_id = logical_resource_id
135 self.physical_resource_id = physical_resource_id
136 self.resource_type = resource_type
137 self.resource_status = resource_status
138 self.resource_status_reason = resource_status_reason
139 self.resource_properties = resource_properties
140 self.timestamp = datetime.utcnow()
141 self.event_id = uuid.uuid4()
142
143
144 class CloudFormationBackend(BaseBackend):
145
146 def __init__(self):
147 self.stacks = OrderedDict()
148 self.deleted_stacks = {}
149 self.exports = OrderedDict()
150 self.change_sets = OrderedDict()
151
152 def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):
153 stack_id = generate_stack_id(name)
154 new_stack = FakeStack(
155 stack_id=stack_id,
156 name=name,
157 template=template,
158 parameters=parameters,
159 region_name=region_name,
160 notification_arns=notification_arns,
161 tags=tags,
162 role_arn=role_arn,
163 cross_stack_resources=self.exports,
164 create_change_set=create_change_set,
165 )
166 self.stacks[stack_id] = new_stack
167 self._validate_export_uniqueness(new_stack)
168 for export in new_stack.exports:
169 self.exports[export.name] = export
170 return new_stack
171
172 def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):
173 if change_set_type == 'UPDATE':
174 stacks = self.stacks.values()
175 stack = None
176 for s in stacks:
177 if s.name == stack_name:
178 stack = s
179 if stack is None:
180 raise ValidationError(stack_name)
181
182 else:
183 stack = self.create_stack(stack_name, template, parameters,
184 region_name, notification_arns, tags,
185 role_arn, create_change_set=True)
186 change_set_id = generate_changeset_id(change_set_name, region_name)
187 self.stacks[change_set_name] = {'Id': change_set_id,
188 'StackId': stack.stack_id}
189 self.change_sets[change_set_id] = stack
190 return change_set_id, stack.stack_id
191
192 def execute_change_set(self, change_set_name, stack_name=None):
193 stack = None
194 if change_set_name in self.change_sets:
195 # This means arn was passed in
196 stack = self.change_sets[change_set_name]
197 else:
198 for cs in self.change_sets:
199 if self.change_sets[cs].name == change_set_name:
200 stack = self.change_sets[cs]
201 if stack is None:
202 raise ValidationError(stack_name)
203 if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS':
204 stack._add_stack_event('CREATE_COMPLETE')
205 else:
206 stack._add_stack_event('UPDATE_IN_PROGRESS')
207 stack._add_stack_event('UPDATE_COMPLETE')
208 return True
209
210 def describe_stacks(self, name_or_stack_id):
211 stacks = self.stacks.values()
212 if name_or_stack_id:
213 for stack in stacks:
214 if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:
215 return [stack]
216 if self.deleted_stacks:
217 deleted_stacks = self.deleted_stacks.values()
218 for stack in deleted_stacks:
219 if stack.stack_id == name_or_stack_id:
220 return [stack]
221 raise ValidationError(name_or_stack_id)
222 else:
223 return list(stacks)
224
225 def list_stacks(self):
226 return self.stacks.values()
227
228 def get_stack(self, name_or_stack_id):
229 all_stacks = dict(self.deleted_stacks, **self.stacks)
230 if name_or_stack_id in all_stacks:
231 # Lookup by stack id - deleted stacks incldued
232 return all_stacks[name_or_stack_id]
233 else:
234 # Lookup by stack name - undeleted stacks only
235 for stack in self.stacks.values():
236 if stack.name == name_or_stack_id:
237 return stack
238
239 def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):
240 stack = self.get_stack(name)
241 stack.update(template, role_arn, parameters=parameters, tags=tags)
242 return stack
243
244 def list_stack_resources(self, stack_name_or_id):
245 stack = self.get_stack(stack_name_or_id)
246 return stack.stack_resources
247
248 def delete_stack(self, name_or_stack_id):
249 if name_or_stack_id in self.stacks:
250 # Delete by stack id
251 stack = self.stacks.pop(name_or_stack_id, None)
252 stack.delete()
253 self.deleted_stacks[stack.stack_id] = stack
254 [self.exports.pop(export.name) for export in stack.exports]
255 return self.stacks.pop(name_or_stack_id, None)
256 else:
257 # Delete by stack name
258 for stack in list(self.stacks.values()):
259 if stack.name == name_or_stack_id:
260 self.delete_stack(stack.stack_id)
261
262 def list_exports(self, token):
263 all_exports = list(self.exports.values())
264 if token is None:
265 exports = all_exports[0:100]
266 next_token = '100' if len(all_exports) > 100 else None
267 else:
268 token = int(token)
269 exports = all_exports[token:token + 100]
270 next_token = str(token + 100) if len(all_exports) > token + 100 else None
271 return exports, next_token
272
273 def _validate_export_uniqueness(self, stack):
274 new_stack_export_names = [x.name for x in stack.exports]
275 export_names = self.exports.keys()
276 if not set(export_names).isdisjoint(new_stack_export_names):
277 raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')
278
279
280 cloudformation_backends = {}
281 for region in boto.cloudformation.regions():
282 cloudformation_backends[region.name] = CloudFormationBackend()
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py
--- a/moto/cloudformation/models.py
+++ b/moto/cloudformation/models.py
@@ -223,7 +223,11 @@
return list(stacks)
def list_stacks(self):
- return self.stacks.values()
+ return [
+ v for v in self.stacks.values()
+ ] + [
+ v for v in self.deleted_stacks.values()
+ ]
def get_stack(self, name_or_stack_id):
all_stacks = dict(self.deleted_stacks, **self.stacks)
| {"golden_diff": "diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py\n--- a/moto/cloudformation/models.py\n+++ b/moto/cloudformation/models.py\n@@ -223,7 +223,11 @@\n return list(stacks)\n \n def list_stacks(self):\n- return self.stacks.values()\n+ return [\n+ v for v in self.stacks.values()\n+ ] + [\n+ v for v in self.deleted_stacks.values()\n+ ]\n \n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n", "issue": "Listing CloudFormation stacks should include deleted stacks\nUsing the AWS API directly returns stacks with a status `DELETE_COMPLETE`; using moto those stacks are not returned.\r\n\r\nThe existing unit tests actually test for this incorrect behaviour:\r\nhttps://github.com/spulec/moto/blob/4a275cc/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py#L516-L518\r\n\r\nThe tests should be updated and the `list_stacks` method on the backend should include the `self.deleted_stacks` data (https://github.com/spulec/moto/blob/4a275cc/moto/cloudformation/models.py#L225-L226)\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom datetime import datetime\nimport json\nimport yaml\nimport uuid\n\nimport boto.cloudformation\nfrom moto.compat import OrderedDict\nfrom moto.core import BaseBackend, BaseModel\n\nfrom .parsing import ResourceMap, OutputMap\nfrom .utils import (\n generate_changeset_id,\n generate_stack_id,\n yaml_tag_constructor,\n)\nfrom .exceptions import ValidationError\n\n\nclass FakeStack(BaseModel):\n\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):\n self.stack_id = stack_id\n self.name = name\n self.template = template\n self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n self.role_arn = role_arn\n self.tags = tags if tags else {}\n self.events = []\n if create_change_set:\n self._add_stack_event(\"REVIEW_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n else:\n self._add_stack_event(\"CREATE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n\n self.description = self.template_dict.get('Description')\n self.cross_stack_resources = cross_stack_resources or {}\n self.resource_map = self._create_resource_map()\n self.output_map = self._create_output_map()\n self._add_stack_event(\"CREATE_COMPLETE\")\n self.status = 'CREATE_COMPLETE'\n\n def _create_resource_map(self):\n resource_map = ResourceMap(\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict, self.cross_stack_resources)\n resource_map.create()\n return resource_map\n\n def _create_output_map(self):\n output_map = OutputMap(self.resource_map, self.template_dict, self.stack_id)\n output_map.create()\n return output_map\n\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=self.name,\n physical_resource_id=self.stack_id,\n resource_type=\"AWS::CloudFormation::Stack\",\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n # not used yet... feel free to help yourself\n resource = self.resource_map[logical_resource_id]\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=logical_resource_id,\n physical_resource_id=resource.physical_resource_id,\n resource_type=resource.type,\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _parse_template(self):\n yaml.add_multi_constructor('', yaml_tag_constructor)\n try:\n self.template_dict = yaml.load(self.template)\n except yaml.parser.ParserError:\n self.template_dict = json.loads(self.template)\n\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\n\n @property\n def stack_resources(self):\n return self.resource_map.values()\n\n @property\n def stack_outputs(self):\n return self.output_map.values()\n\n @property\n def exports(self):\n return self.output_map.exports\n\n def update(self, template, role_arn=None, parameters=None, tags=None):\n self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n self.template = template\n self._parse_template()\n self.resource_map.update(self.template_dict, parameters)\n self.output_map = self._create_output_map()\n self._add_stack_event(\"UPDATE_COMPLETE\")\n self.status = \"UPDATE_COMPLETE\"\n self.role_arn = role_arn\n # only overwrite tags if passed\n if tags is not None:\n self.tags = tags\n # TODO: update tags in the resource map\n\n def delete(self):\n self._add_stack_event(\"DELETE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n self.resource_map.delete()\n self._add_stack_event(\"DELETE_COMPLETE\")\n self.status = \"DELETE_COMPLETE\"\n\n\nclass FakeEvent(BaseModel):\n\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n self.stack_id = stack_id\n self.stack_name = stack_name\n self.logical_resource_id = logical_resource_id\n self.physical_resource_id = physical_resource_id\n self.resource_type = resource_type\n self.resource_status = resource_status\n self.resource_status_reason = resource_status_reason\n self.resource_properties = resource_properties\n self.timestamp = datetime.utcnow()\n self.event_id = uuid.uuid4()\n\n\nclass CloudFormationBackend(BaseBackend):\n\n def __init__(self):\n self.stacks = OrderedDict()\n self.deleted_stacks = {}\n self.exports = OrderedDict()\n self.change_sets = OrderedDict()\n\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):\n stack_id = generate_stack_id(name)\n new_stack = FakeStack(\n stack_id=stack_id,\n name=name,\n template=template,\n parameters=parameters,\n region_name=region_name,\n notification_arns=notification_arns,\n tags=tags,\n role_arn=role_arn,\n cross_stack_resources=self.exports,\n create_change_set=create_change_set,\n )\n self.stacks[stack_id] = new_stack\n self._validate_export_uniqueness(new_stack)\n for export in new_stack.exports:\n self.exports[export.name] = export\n return new_stack\n\n def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):\n if change_set_type == 'UPDATE':\n stacks = self.stacks.values()\n stack = None\n for s in stacks:\n if s.name == stack_name:\n stack = s\n if stack is None:\n raise ValidationError(stack_name)\n\n else:\n stack = self.create_stack(stack_name, template, parameters,\n region_name, notification_arns, tags,\n role_arn, create_change_set=True)\n change_set_id = generate_changeset_id(change_set_name, region_name)\n self.stacks[change_set_name] = {'Id': change_set_id,\n 'StackId': stack.stack_id}\n self.change_sets[change_set_id] = stack\n return change_set_id, stack.stack_id\n\n def execute_change_set(self, change_set_name, stack_name=None):\n stack = None\n if change_set_name in self.change_sets:\n # This means arn was passed in\n stack = self.change_sets[change_set_name]\n else:\n for cs in self.change_sets:\n if self.change_sets[cs].name == change_set_name:\n stack = self.change_sets[cs]\n if stack is None:\n raise ValidationError(stack_name)\n if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS':\n stack._add_stack_event('CREATE_COMPLETE')\n else:\n stack._add_stack_event('UPDATE_IN_PROGRESS')\n stack._add_stack_event('UPDATE_COMPLETE')\n return True\n\n def describe_stacks(self, name_or_stack_id):\n stacks = self.stacks.values()\n if name_or_stack_id:\n for stack in stacks:\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n return [stack]\n if self.deleted_stacks:\n deleted_stacks = self.deleted_stacks.values()\n for stack in deleted_stacks:\n if stack.stack_id == name_or_stack_id:\n return [stack]\n raise ValidationError(name_or_stack_id)\n else:\n return list(stacks)\n\n def list_stacks(self):\n return self.stacks.values()\n\n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n if name_or_stack_id in all_stacks:\n # Lookup by stack id - deleted stacks incldued\n return all_stacks[name_or_stack_id]\n else:\n # Lookup by stack name - undeleted stacks only\n for stack in self.stacks.values():\n if stack.name == name_or_stack_id:\n return stack\n\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n stack = self.get_stack(name)\n stack.update(template, role_arn, parameters=parameters, tags=tags)\n return stack\n\n def list_stack_resources(self, stack_name_or_id):\n stack = self.get_stack(stack_name_or_id)\n return stack.stack_resources\n\n def delete_stack(self, name_or_stack_id):\n if name_or_stack_id in self.stacks:\n # Delete by stack id\n stack = self.stacks.pop(name_or_stack_id, None)\n stack.delete()\n self.deleted_stacks[stack.stack_id] = stack\n [self.exports.pop(export.name) for export in stack.exports]\n return self.stacks.pop(name_or_stack_id, None)\n else:\n # Delete by stack name\n for stack in list(self.stacks.values()):\n if stack.name == name_or_stack_id:\n self.delete_stack(stack.stack_id)\n\n def list_exports(self, token):\n all_exports = list(self.exports.values())\n if token is None:\n exports = all_exports[0:100]\n next_token = '100' if len(all_exports) > 100 else None\n else:\n token = int(token)\n exports = all_exports[token:token + 100]\n next_token = str(token + 100) if len(all_exports) > token + 100 else None\n return exports, next_token\n\n def _validate_export_uniqueness(self, stack):\n new_stack_export_names = [x.name for x in stack.exports]\n export_names = self.exports.keys()\n if not set(export_names).isdisjoint(new_stack_export_names):\n raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')\n\n\ncloudformation_backends = {}\nfor region in boto.cloudformation.regions():\n cloudformation_backends[region.name] = CloudFormationBackend()\n", "path": "moto/cloudformation/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom datetime import datetime\nimport json\nimport yaml\nimport uuid\n\nimport boto.cloudformation\nfrom moto.compat import OrderedDict\nfrom moto.core import BaseBackend, BaseModel\n\nfrom .parsing import ResourceMap, OutputMap\nfrom .utils import (\n generate_changeset_id,\n generate_stack_id,\n yaml_tag_constructor,\n)\nfrom .exceptions import ValidationError\n\n\nclass FakeStack(BaseModel):\n\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):\n self.stack_id = stack_id\n self.name = name\n self.template = template\n self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n self.role_arn = role_arn\n self.tags = tags if tags else {}\n self.events = []\n if create_change_set:\n self._add_stack_event(\"REVIEW_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n else:\n self._add_stack_event(\"CREATE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n\n self.description = self.template_dict.get('Description')\n self.cross_stack_resources = cross_stack_resources or {}\n self.resource_map = self._create_resource_map()\n self.output_map = self._create_output_map()\n self._add_stack_event(\"CREATE_COMPLETE\")\n self.status = 'CREATE_COMPLETE'\n\n def _create_resource_map(self):\n resource_map = ResourceMap(\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict, self.cross_stack_resources)\n resource_map.create()\n return resource_map\n\n def _create_output_map(self):\n output_map = OutputMap(self.resource_map, self.template_dict, self.stack_id)\n output_map.create()\n return output_map\n\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=self.name,\n physical_resource_id=self.stack_id,\n resource_type=\"AWS::CloudFormation::Stack\",\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n # not used yet... feel free to help yourself\n resource = self.resource_map[logical_resource_id]\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=logical_resource_id,\n physical_resource_id=resource.physical_resource_id,\n resource_type=resource.type,\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _parse_template(self):\n yaml.add_multi_constructor('', yaml_tag_constructor)\n try:\n self.template_dict = yaml.load(self.template)\n except yaml.parser.ParserError:\n self.template_dict = json.loads(self.template)\n\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\n\n @property\n def stack_resources(self):\n return self.resource_map.values()\n\n @property\n def stack_outputs(self):\n return self.output_map.values()\n\n @property\n def exports(self):\n return self.output_map.exports\n\n def update(self, template, role_arn=None, parameters=None, tags=None):\n self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n self.template = template\n self._parse_template()\n self.resource_map.update(self.template_dict, parameters)\n self.output_map = self._create_output_map()\n self._add_stack_event(\"UPDATE_COMPLETE\")\n self.status = \"UPDATE_COMPLETE\"\n self.role_arn = role_arn\n # only overwrite tags if passed\n if tags is not None:\n self.tags = tags\n # TODO: update tags in the resource map\n\n def delete(self):\n self._add_stack_event(\"DELETE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n self.resource_map.delete()\n self._add_stack_event(\"DELETE_COMPLETE\")\n self.status = \"DELETE_COMPLETE\"\n\n\nclass FakeEvent(BaseModel):\n\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n self.stack_id = stack_id\n self.stack_name = stack_name\n self.logical_resource_id = logical_resource_id\n self.physical_resource_id = physical_resource_id\n self.resource_type = resource_type\n self.resource_status = resource_status\n self.resource_status_reason = resource_status_reason\n self.resource_properties = resource_properties\n self.timestamp = datetime.utcnow()\n self.event_id = uuid.uuid4()\n\n\nclass CloudFormationBackend(BaseBackend):\n\n def __init__(self):\n self.stacks = OrderedDict()\n self.deleted_stacks = {}\n self.exports = OrderedDict()\n self.change_sets = OrderedDict()\n\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):\n stack_id = generate_stack_id(name)\n new_stack = FakeStack(\n stack_id=stack_id,\n name=name,\n template=template,\n parameters=parameters,\n region_name=region_name,\n notification_arns=notification_arns,\n tags=tags,\n role_arn=role_arn,\n cross_stack_resources=self.exports,\n create_change_set=create_change_set,\n )\n self.stacks[stack_id] = new_stack\n self._validate_export_uniqueness(new_stack)\n for export in new_stack.exports:\n self.exports[export.name] = export\n return new_stack\n\n def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):\n if change_set_type == 'UPDATE':\n stacks = self.stacks.values()\n stack = None\n for s in stacks:\n if s.name == stack_name:\n stack = s\n if stack is None:\n raise ValidationError(stack_name)\n\n else:\n stack = self.create_stack(stack_name, template, parameters,\n region_name, notification_arns, tags,\n role_arn, create_change_set=True)\n change_set_id = generate_changeset_id(change_set_name, region_name)\n self.stacks[change_set_name] = {'Id': change_set_id,\n 'StackId': stack.stack_id}\n self.change_sets[change_set_id] = stack\n return change_set_id, stack.stack_id\n\n def execute_change_set(self, change_set_name, stack_name=None):\n stack = None\n if change_set_name in self.change_sets:\n # This means arn was passed in\n stack = self.change_sets[change_set_name]\n else:\n for cs in self.change_sets:\n if self.change_sets[cs].name == change_set_name:\n stack = self.change_sets[cs]\n if stack is None:\n raise ValidationError(stack_name)\n if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS':\n stack._add_stack_event('CREATE_COMPLETE')\n else:\n stack._add_stack_event('UPDATE_IN_PROGRESS')\n stack._add_stack_event('UPDATE_COMPLETE')\n return True\n\n def describe_stacks(self, name_or_stack_id):\n stacks = self.stacks.values()\n if name_or_stack_id:\n for stack in stacks:\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n return [stack]\n if self.deleted_stacks:\n deleted_stacks = self.deleted_stacks.values()\n for stack in deleted_stacks:\n if stack.stack_id == name_or_stack_id:\n return [stack]\n raise ValidationError(name_or_stack_id)\n else:\n return list(stacks)\n\n def list_stacks(self):\n return [\n v for v in self.stacks.values()\n ] + [\n v for v in self.deleted_stacks.values()\n ]\n\n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n if name_or_stack_id in all_stacks:\n # Lookup by stack id - deleted stacks incldued\n return all_stacks[name_or_stack_id]\n else:\n # Lookup by stack name - undeleted stacks only\n for stack in self.stacks.values():\n if stack.name == name_or_stack_id:\n return stack\n\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n stack = self.get_stack(name)\n stack.update(template, role_arn, parameters=parameters, tags=tags)\n return stack\n\n def list_stack_resources(self, stack_name_or_id):\n stack = self.get_stack(stack_name_or_id)\n return stack.stack_resources\n\n def delete_stack(self, name_or_stack_id):\n if name_or_stack_id in self.stacks:\n # Delete by stack id\n stack = self.stacks.pop(name_or_stack_id, None)\n stack.delete()\n self.deleted_stacks[stack.stack_id] = stack\n [self.exports.pop(export.name) for export in stack.exports]\n return self.stacks.pop(name_or_stack_id, None)\n else:\n # Delete by stack name\n for stack in list(self.stacks.values()):\n if stack.name == name_or_stack_id:\n self.delete_stack(stack.stack_id)\n\n def list_exports(self, token):\n all_exports = list(self.exports.values())\n if token is None:\n exports = all_exports[0:100]\n next_token = '100' if len(all_exports) > 100 else None\n else:\n token = int(token)\n exports = all_exports[token:token + 100]\n next_token = str(token + 100) if len(all_exports) > token + 100 else None\n return exports, next_token\n\n def _validate_export_uniqueness(self, stack):\n new_stack_export_names = [x.name for x in stack.exports]\n export_names = self.exports.keys()\n if not set(export_names).isdisjoint(new_stack_export_names):\n raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')\n\n\ncloudformation_backends = {}\nfor region in boto.cloudformation.regions():\n cloudformation_backends[region.name] = CloudFormationBackend()\n", "path": "moto/cloudformation/models.py"}]} | 3,428 | 136 |
gh_patches_debug_33921 | rasdani/github-patches | git_diff | coqui-ai__TTS-1227 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ParallelWaveGAN config should be adjusted
Hi,
I have tried training with the current default config and it drops the learning rate too fast and the model converges to generate noise. Because when `scheduler_after_epoch=False`, `ExponentialLR` with `gamma=0.999` will cause the learning rate to reach 0.000 in 10k steps.
Config from the original paper are:
```
batch_size=8,
stft_loss_weight=1.0,
mse_G_loss_weight=4.0,
steps_to_start_discriminator=100000,
lr_gen=0.0001,
lr_disc=0.00005,
lr_scheduler_gen="StepLR",
lr_scheduler_gen_params={"gamma": 0.5, "step_size": 200000, "last_epoch": -1},
lr_scheduler_disc="StepLR",
lr_scheduler_disc_params={"gamma": 0.5, "step_size": 200000, "last_epoch": -1},
scheduler_after_epoch=False,
```
It is also possible to use ExponentialLR with some float rounding error:
```
lr_scheduler_gen="ExponentialLR", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
lr_scheduler_gen_params={"gamma": 0.5**(1/200000), "last_epoch": -1},
lr_scheduler_disc="ExponentialLR", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
lr_scheduler_disc_params={"gamma": 0.5**(1/200000), "last_epoch": -1},
```
With more GPU memory the batch_size can be increased and steps reduced.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `TTS/vocoder/configs/parallel_wavegan_config.py`
Content:
```
1 from dataclasses import dataclass, field
2
3 from .shared_configs import BaseGANVocoderConfig
4
5
6 @dataclass
7 class ParallelWaveganConfig(BaseGANVocoderConfig):
8 """Defines parameters for ParallelWavegan vocoder.
9
10 Args:
11 model (str):
12 Model name used for selecting the right configuration at initialization. Defaults to `gan`.
13 discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to
14 'parallel_wavegan_discriminator`.
15 discriminator_model_params (dict): The discriminator model kwargs. Defaults to
16 '{"num_layers": 10}`
17 generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is
18 considered as a generator too. Defaults to `parallel_wavegan_generator`.
19 generator_model_param (dict):
20 The generator model kwargs. Defaults to `{"upsample_factors": [4, 4, 4, 4], "stacks": 3, "num_res_blocks": 30}`.
21 batch_size (int):
22 Batch size used at training. Larger values use more memory. Defaults to 16.
23 seq_len (int):
24 Audio segment length used at training. Larger values use more memory. Defaults to 8192.
25 pad_short (int):
26 Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0.
27 use_noise_augment (bool):
28 enable / disable random noise added to the input waveform. The noise is added after computing the
29 features. Defaults to True.
30 use_cache (bool):
31 enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is
32 not large enough. Defaults to True.
33 steps_to_start_discriminator (int):
34 Number of steps required to start training the discriminator. Defaults to 0.
35 use_stft_loss (bool):`
36 enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True.
37 use_subband_stft (bool):
38 enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True.
39 use_mse_gan_loss (bool):
40 enable / disable using Mean Squeare Error GAN loss. Defaults to True.
41 use_hinge_gan_loss (bool):
42 enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models.
43 Defaults to False.
44 use_feat_match_loss (bool):
45 enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True.
46 use_l1_spec_loss (bool):
47 enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False.
48 stft_loss_params (dict): STFT loss parameters. Default to
49 `{"n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], "win_lengths": [600, 1200, 240]}`
50 stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total
51 model loss. Defaults to 0.5.
52 subband_stft_loss_weight (float):
53 Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.
54 mse_G_loss_weight (float):
55 MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5.
56 hinge_G_loss_weight (float):
57 Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.
58 feat_match_loss_weight (float):
59 Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 0.
60 l1_spec_loss_weight (float):
61 L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.
62 lr_gen (float):
63 Generator model initial learning rate. Defaults to 0.0002.
64 lr_disc (float):
65 Discriminator model initial learning rate. Defaults to 0.0002.
66 optimizer (torch.optim.Optimizer):
67 Optimizer used for the training. Defaults to `AdamW`.
68 optimizer_params (dict):
69 Optimizer kwargs. Defaults to `{"betas": [0.8, 0.99], "weight_decay": 0.0}`
70 lr_scheduler_gen (torch.optim.Scheduler):
71 Learning rate scheduler for the generator. Defaults to `ExponentialLR`.
72 lr_scheduler_gen_params (dict):
73 Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
74 lr_scheduler_disc (torch.optim.Scheduler):
75 Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.
76 lr_scheduler_dict_params (dict):
77 Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
78 """
79
80 model: str = "parallel_wavegan"
81
82 # Model specific params
83 discriminator_model: str = "parallel_wavegan_discriminator"
84 discriminator_model_params: dict = field(default_factory=lambda: {"num_layers": 10})
85 generator_model: str = "parallel_wavegan_generator"
86 generator_model_params: dict = field(
87 default_factory=lambda: {"upsample_factors": [4, 4, 4, 4], "stacks": 3, "num_res_blocks": 30}
88 )
89
90 # Training - overrides
91 batch_size: int = 6
92 seq_len: int = 25600
93 pad_short: int = 2000
94 use_noise_augment: bool = False
95 use_cache: bool = True
96 steps_to_start_discriminator: int = 200000
97
98 # LOSS PARAMETERS - overrides
99 use_stft_loss: bool = True
100 use_subband_stft_loss: bool = False
101 use_mse_gan_loss: bool = True
102 use_hinge_gan_loss: bool = False
103 use_feat_match_loss: bool = False # requires MelGAN Discriminators (MelGAN and HifiGAN)
104 use_l1_spec_loss: bool = False
105
106 stft_loss_params: dict = field(
107 default_factory=lambda: {
108 "n_ffts": [1024, 2048, 512],
109 "hop_lengths": [120, 240, 50],
110 "win_lengths": [600, 1200, 240],
111 }
112 )
113
114 # loss weights - overrides
115 stft_loss_weight: float = 0.5
116 subband_stft_loss_weight: float = 0
117 mse_G_loss_weight: float = 2.5
118 hinge_G_loss_weight: float = 0
119 feat_match_loss_weight: float = 0
120 l1_spec_loss_weight: float = 0
121
122 # optimizer overrides
123 lr_gen: float = 0.0002 # Initial learning rate.
124 lr_disc: float = 0.0002 # Initial learning rate.
125 optimizer: str = "AdamW"
126 optimizer_params: dict = field(default_factory=lambda: {"betas": [0.8, 0.99], "weight_decay": 0.0})
127 lr_scheduler_gen: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
128 lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
129 lr_scheduler_disc: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
130 lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/TTS/vocoder/configs/parallel_wavegan_config.py b/TTS/vocoder/configs/parallel_wavegan_config.py
--- a/TTS/vocoder/configs/parallel_wavegan_config.py
+++ b/TTS/vocoder/configs/parallel_wavegan_config.py
@@ -70,11 +70,11 @@
lr_scheduler_gen (torch.optim.Scheduler):
Learning rate scheduler for the generator. Defaults to `ExponentialLR`.
lr_scheduler_gen_params (dict):
- Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
+ Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.5, "step_size": 200000, "last_epoch": -1}`.
lr_scheduler_disc (torch.optim.Scheduler):
Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.
lr_scheduler_dict_params (dict):
- Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
+ Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.5, "step_size": 200000, "last_epoch": -1}`.
"""
model: str = "parallel_wavegan"
@@ -124,7 +124,8 @@
lr_disc: float = 0.0002 # Initial learning rate.
optimizer: str = "AdamW"
optimizer_params: dict = field(default_factory=lambda: {"betas": [0.8, 0.99], "weight_decay": 0.0})
- lr_scheduler_gen: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
- lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
- lr_scheduler_disc: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
- lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
+ lr_scheduler_gen: str = "StepLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
+ lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.5, "step_size": 200000, "last_epoch": -1})
+ lr_scheduler_disc: str = "StepLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
+ lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.5, "step_size": 200000, "last_epoch": -1})
+ scheduler_after_epoch: bool = False
| {"golden_diff": "diff --git a/TTS/vocoder/configs/parallel_wavegan_config.py b/TTS/vocoder/configs/parallel_wavegan_config.py\n--- a/TTS/vocoder/configs/parallel_wavegan_config.py\n+++ b/TTS/vocoder/configs/parallel_wavegan_config.py\n@@ -70,11 +70,11 @@\n lr_scheduler_gen (torch.optim.Scheduler):\n Learning rate scheduler for the generator. Defaults to `ExponentialLR`.\n lr_scheduler_gen_params (dict):\n- Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n+ Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1}`.\n lr_scheduler_disc (torch.optim.Scheduler):\n Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.\n lr_scheduler_dict_params (dict):\n- Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n+ Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1}`.\n \"\"\"\n \n model: str = \"parallel_wavegan\"\n@@ -124,7 +124,8 @@\n lr_disc: float = 0.0002 # Initial learning rate.\n optimizer: str = \"AdamW\"\n optimizer_params: dict = field(default_factory=lambda: {\"betas\": [0.8, 0.99], \"weight_decay\": 0.0})\n- lr_scheduler_gen: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n- lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n- lr_scheduler_disc: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n- lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n+ lr_scheduler_gen: str = \"StepLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n+ lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1})\n+ lr_scheduler_disc: str = \"StepLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n+ lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1})\n+ scheduler_after_epoch: bool = False\n", "issue": "ParallelWaveGAN config should be adjusted\nHi,\r\n\r\nI have tried training with the current default config and it drops the learning rate too fast and the model converges to generate noise. Because when `scheduler_after_epoch=False`, `ExponentialLR` with `gamma=0.999` will cause the learning rate to reach 0.000 in 10k steps.\r\n\r\nConfig from the original paper are:\r\n ```\r\n batch_size=8,\r\n stft_loss_weight=1.0,\r\n mse_G_loss_weight=4.0,\r\n steps_to_start_discriminator=100000,\r\n lr_gen=0.0001,\r\n lr_disc=0.00005,\r\n lr_scheduler_gen=\"StepLR\",\r\n lr_scheduler_gen_params={\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1},\r\n lr_scheduler_disc=\"StepLR\",\r\n lr_scheduler_disc_params={\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1},\r\n scheduler_after_epoch=False,\r\n```\r\n\r\nIt is also possible to use ExponentialLR with some float rounding error:\r\n```\r\n lr_scheduler_gen=\"ExponentialLR\", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\r\n lr_scheduler_gen_params={\"gamma\": 0.5**(1/200000), \"last_epoch\": -1},\r\n lr_scheduler_disc=\"ExponentialLR\", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\r\n lr_scheduler_disc_params={\"gamma\": 0.5**(1/200000), \"last_epoch\": -1},\r\n```\r\n\r\nWith more GPU memory the batch_size can be increased and steps reduced.\n", "before_files": [{"content": "from dataclasses import dataclass, field\n\nfrom .shared_configs import BaseGANVocoderConfig\n\n\n@dataclass\nclass ParallelWaveganConfig(BaseGANVocoderConfig):\n \"\"\"Defines parameters for ParallelWavegan vocoder.\n\n Args:\n model (str):\n Model name used for selecting the right configuration at initialization. Defaults to `gan`.\n discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to\n 'parallel_wavegan_discriminator`.\n discriminator_model_params (dict): The discriminator model kwargs. Defaults to\n '{\"num_layers\": 10}`\n generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is\n considered as a generator too. Defaults to `parallel_wavegan_generator`.\n generator_model_param (dict):\n The generator model kwargs. Defaults to `{\"upsample_factors\": [4, 4, 4, 4], \"stacks\": 3, \"num_res_blocks\": 30}`.\n batch_size (int):\n Batch size used at training. Larger values use more memory. Defaults to 16.\n seq_len (int):\n Audio segment length used at training. Larger values use more memory. Defaults to 8192.\n pad_short (int):\n Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0.\n use_noise_augment (bool):\n enable / disable random noise added to the input waveform. The noise is added after computing the\n features. Defaults to True.\n use_cache (bool):\n enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is\n not large enough. Defaults to True.\n steps_to_start_discriminator (int):\n Number of steps required to start training the discriminator. Defaults to 0.\n use_stft_loss (bool):`\n enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True.\n use_subband_stft (bool):\n enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True.\n use_mse_gan_loss (bool):\n enable / disable using Mean Squeare Error GAN loss. Defaults to True.\n use_hinge_gan_loss (bool):\n enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models.\n Defaults to False.\n use_feat_match_loss (bool):\n enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True.\n use_l1_spec_loss (bool):\n enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False.\n stft_loss_params (dict): STFT loss parameters. Default to\n `{\"n_ffts\": [1024, 2048, 512], \"hop_lengths\": [120, 240, 50], \"win_lengths\": [600, 1200, 240]}`\n stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total\n model loss. Defaults to 0.5.\n subband_stft_loss_weight (float):\n Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n mse_G_loss_weight (float):\n MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5.\n hinge_G_loss_weight (float):\n Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n feat_match_loss_weight (float):\n Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 0.\n l1_spec_loss_weight (float):\n L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n lr_gen (float):\n Generator model initial learning rate. Defaults to 0.0002.\n lr_disc (float):\n Discriminator model initial learning rate. Defaults to 0.0002.\n optimizer (torch.optim.Optimizer):\n Optimizer used for the training. Defaults to `AdamW`.\n optimizer_params (dict):\n Optimizer kwargs. Defaults to `{\"betas\": [0.8, 0.99], \"weight_decay\": 0.0}`\n lr_scheduler_gen (torch.optim.Scheduler):\n Learning rate scheduler for the generator. Defaults to `ExponentialLR`.\n lr_scheduler_gen_params (dict):\n Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n lr_scheduler_disc (torch.optim.Scheduler):\n Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.\n lr_scheduler_dict_params (dict):\n Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n \"\"\"\n\n model: str = \"parallel_wavegan\"\n\n # Model specific params\n discriminator_model: str = \"parallel_wavegan_discriminator\"\n discriminator_model_params: dict = field(default_factory=lambda: {\"num_layers\": 10})\n generator_model: str = \"parallel_wavegan_generator\"\n generator_model_params: dict = field(\n default_factory=lambda: {\"upsample_factors\": [4, 4, 4, 4], \"stacks\": 3, \"num_res_blocks\": 30}\n )\n\n # Training - overrides\n batch_size: int = 6\n seq_len: int = 25600\n pad_short: int = 2000\n use_noise_augment: bool = False\n use_cache: bool = True\n steps_to_start_discriminator: int = 200000\n\n # LOSS PARAMETERS - overrides\n use_stft_loss: bool = True\n use_subband_stft_loss: bool = False\n use_mse_gan_loss: bool = True\n use_hinge_gan_loss: bool = False\n use_feat_match_loss: bool = False # requires MelGAN Discriminators (MelGAN and HifiGAN)\n use_l1_spec_loss: bool = False\n\n stft_loss_params: dict = field(\n default_factory=lambda: {\n \"n_ffts\": [1024, 2048, 512],\n \"hop_lengths\": [120, 240, 50],\n \"win_lengths\": [600, 1200, 240],\n }\n )\n\n # loss weights - overrides\n stft_loss_weight: float = 0.5\n subband_stft_loss_weight: float = 0\n mse_G_loss_weight: float = 2.5\n hinge_G_loss_weight: float = 0\n feat_match_loss_weight: float = 0\n l1_spec_loss_weight: float = 0\n\n # optimizer overrides\n lr_gen: float = 0.0002 # Initial learning rate.\n lr_disc: float = 0.0002 # Initial learning rate.\n optimizer: str = \"AdamW\"\n optimizer_params: dict = field(default_factory=lambda: {\"betas\": [0.8, 0.99], \"weight_decay\": 0.0})\n lr_scheduler_gen: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n lr_scheduler_disc: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n", "path": "TTS/vocoder/configs/parallel_wavegan_config.py"}], "after_files": [{"content": "from dataclasses import dataclass, field\n\nfrom .shared_configs import BaseGANVocoderConfig\n\n\n@dataclass\nclass ParallelWaveganConfig(BaseGANVocoderConfig):\n \"\"\"Defines parameters for ParallelWavegan vocoder.\n\n Args:\n model (str):\n Model name used for selecting the right configuration at initialization. Defaults to `gan`.\n discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to\n 'parallel_wavegan_discriminator`.\n discriminator_model_params (dict): The discriminator model kwargs. Defaults to\n '{\"num_layers\": 10}`\n generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is\n considered as a generator too. Defaults to `parallel_wavegan_generator`.\n generator_model_param (dict):\n The generator model kwargs. Defaults to `{\"upsample_factors\": [4, 4, 4, 4], \"stacks\": 3, \"num_res_blocks\": 30}`.\n batch_size (int):\n Batch size used at training. Larger values use more memory. Defaults to 16.\n seq_len (int):\n Audio segment length used at training. Larger values use more memory. Defaults to 8192.\n pad_short (int):\n Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0.\n use_noise_augment (bool):\n enable / disable random noise added to the input waveform. The noise is added after computing the\n features. Defaults to True.\n use_cache (bool):\n enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is\n not large enough. Defaults to True.\n steps_to_start_discriminator (int):\n Number of steps required to start training the discriminator. Defaults to 0.\n use_stft_loss (bool):`\n enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True.\n use_subband_stft (bool):\n enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True.\n use_mse_gan_loss (bool):\n enable / disable using Mean Squeare Error GAN loss. Defaults to True.\n use_hinge_gan_loss (bool):\n enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models.\n Defaults to False.\n use_feat_match_loss (bool):\n enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True.\n use_l1_spec_loss (bool):\n enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False.\n stft_loss_params (dict): STFT loss parameters. Default to\n `{\"n_ffts\": [1024, 2048, 512], \"hop_lengths\": [120, 240, 50], \"win_lengths\": [600, 1200, 240]}`\n stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total\n model loss. Defaults to 0.5.\n subband_stft_loss_weight (float):\n Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n mse_G_loss_weight (float):\n MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5.\n hinge_G_loss_weight (float):\n Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n feat_match_loss_weight (float):\n Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 0.\n l1_spec_loss_weight (float):\n L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n lr_gen (float):\n Generator model initial learning rate. Defaults to 0.0002.\n lr_disc (float):\n Discriminator model initial learning rate. Defaults to 0.0002.\n optimizer (torch.optim.Optimizer):\n Optimizer used for the training. Defaults to `AdamW`.\n optimizer_params (dict):\n Optimizer kwargs. Defaults to `{\"betas\": [0.8, 0.99], \"weight_decay\": 0.0}`\n lr_scheduler_gen (torch.optim.Scheduler):\n Learning rate scheduler for the generator. Defaults to `ExponentialLR`.\n lr_scheduler_gen_params (dict):\n Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1}`.\n lr_scheduler_disc (torch.optim.Scheduler):\n Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.\n lr_scheduler_dict_params (dict):\n Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1}`.\n \"\"\"\n\n model: str = \"parallel_wavegan\"\n\n # Model specific params\n discriminator_model: str = \"parallel_wavegan_discriminator\"\n discriminator_model_params: dict = field(default_factory=lambda: {\"num_layers\": 10})\n generator_model: str = \"parallel_wavegan_generator\"\n generator_model_params: dict = field(\n default_factory=lambda: {\"upsample_factors\": [4, 4, 4, 4], \"stacks\": 3, \"num_res_blocks\": 30}\n )\n\n # Training - overrides\n batch_size: int = 6\n seq_len: int = 25600\n pad_short: int = 2000\n use_noise_augment: bool = False\n use_cache: bool = True\n steps_to_start_discriminator: int = 200000\n\n # LOSS PARAMETERS - overrides\n use_stft_loss: bool = True\n use_subband_stft_loss: bool = False\n use_mse_gan_loss: bool = True\n use_hinge_gan_loss: bool = False\n use_feat_match_loss: bool = False # requires MelGAN Discriminators (MelGAN and HifiGAN)\n use_l1_spec_loss: bool = False\n\n stft_loss_params: dict = field(\n default_factory=lambda: {\n \"n_ffts\": [1024, 2048, 512],\n \"hop_lengths\": [120, 240, 50],\n \"win_lengths\": [600, 1200, 240],\n }\n )\n\n # loss weights - overrides\n stft_loss_weight: float = 0.5\n subband_stft_loss_weight: float = 0\n mse_G_loss_weight: float = 2.5\n hinge_G_loss_weight: float = 0\n feat_match_loss_weight: float = 0\n l1_spec_loss_weight: float = 0\n\n # optimizer overrides\n lr_gen: float = 0.0002 # Initial learning rate.\n lr_disc: float = 0.0002 # Initial learning rate.\n optimizer: str = \"AdamW\"\n optimizer_params: dict = field(default_factory=lambda: {\"betas\": [0.8, 0.99], \"weight_decay\": 0.0})\n lr_scheduler_gen: str = \"StepLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1})\n lr_scheduler_disc: str = \"StepLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1})\n scheduler_after_epoch: bool = False\n", "path": "TTS/vocoder/configs/parallel_wavegan_config.py"}]} | 2,707 | 658 |
gh_patches_debug_49498 | rasdani/github-patches | git_diff | pex-tool__pex-1516 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.55
On the docket:
+ [x] Add official support for Python 3.10 (#1512)
+ [x] Always register global options. (#1511)
+ [x] Fix RTD generation by pinning docutils low. (#1509)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.54"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.54"
+__version__ = "2.1.55"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.54\"\n+__version__ = \"2.1.55\"\n", "issue": "Release 2.1.55\nOn the docket:\r\n+ [x] Add official support for Python 3.10 (#1512)\r\n+ [x] Always register global options. (#1511)\r\n+ [x] Fix RTD generation by pinning docutils low. (#1509)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.54\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.55\"\n", "path": "pex/version.py"}]} | 377 | 96 |
gh_patches_debug_16916 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] Clarify original-dataframe mutation behavior in pyjanitor function docstrings
# Brief Description of Fix
Currently, some pyjanitor functions mutate the original dataframe and others return a copy. Solutions are heavily discussed in #79 but no conclusion has been reached. At the moment, it is unclear, without experimentation from the user, which behavior applies in each function.
In the interim, I propose to explicitly clarify this behavior in each function's docstring so the user has a clear idea regarding the function's mutating behavior. Below is a sample of what this could look like for `.clean_names()`:
"""
Clean column names.
Takes all column names, converts them to lowercase, then replaces all
spaces with underscores. <b>Does not mutate original dataframe.</b>
"""
Happy to add this line somewhere else in the docstring if inappropriate here.
- [Link to documentation page](https://pyjanitor.readthedocs.io/reference/index.html)
- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/blob/dev/janitor/functions.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/biology.py`
Content:
```
1 """
2 Biology and bioinformatics-oriented data cleaning functions.
3 """
4
5 import pandas as pd
6 import pandas_flavor as pf
7
8 from .utils import deprecated_alias, import_message
9
10 try:
11 from Bio import SeqIO
12 except ImportError:
13 import_message(
14 "biology", "biopython", "conda install -c conda-forge biopython"
15 )
16
17
18 @pf.register_dataframe_method
19 @deprecated_alias(col_name="column_name")
20 def join_fasta(
21 df: pd.DataFrame, filename: str, id_col: str, column_name
22 ) -> pd.DataFrame:
23 """
24 Convenience method to join in a FASTA file as a column.
25
26 This allows us to add the string sequence of a FASTA file as a new column
27 of data in the dataframe.
28
29 This function only attaches the string representation of the SeqRecord.Seq
30 object from Biopython. Does not attach the full SeqRecord. Alphabet is
31 also not stored, under the assumption that the data scientist has domain
32 knowledge of what kind of sequence is being read in (nucleotide vs. amino
33 acid.)
34
35 For more advanced functions, please use phylopandas.
36
37 :param df: A pandas DataFrame.
38 :param filename: Path to the FASTA file.
39 :param id_col: The column in the DataFrame that houses sequence IDs.
40 :param column_name: The name of the new column.
41 """
42 seqrecords = {
43 x.id: x.seq.__str__() for x in SeqIO.parse(filename, "fasta")
44 }
45 seq_col = [seqrecords[i] for i in df[id_col]]
46 df[column_name] = seq_col
47 return df
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/biology.py b/janitor/biology.py
--- a/janitor/biology.py
+++ b/janitor/biology.py
@@ -26,12 +26,14 @@
This allows us to add the string sequence of a FASTA file as a new column
of data in the dataframe.
- This function only attaches the string representation of the SeqRecord.Seq
+ This method only attaches the string representation of the SeqRecord.Seq
object from Biopython. Does not attach the full SeqRecord. Alphabet is
also not stored, under the assumption that the data scientist has domain
knowledge of what kind of sequence is being read in (nucleotide vs. amino
acid.)
+ This method mutates the original DataFrame.
+
For more advanced functions, please use phylopandas.
:param df: A pandas DataFrame.
| {"golden_diff": "diff --git a/janitor/biology.py b/janitor/biology.py\n--- a/janitor/biology.py\n+++ b/janitor/biology.py\n@@ -26,12 +26,14 @@\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n \n- This function only attaches the string representation of the SeqRecord.Seq\n+ This method only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n \n+ This method mutates the original DataFrame.\n+\n For more advanced functions, please use phylopandas.\n \n :param df: A pandas DataFrame.\n", "issue": "[DOC] Clarify original-dataframe mutation behavior in pyjanitor function docstrings\n# Brief Description of Fix\r\nCurrently, some pyjanitor functions mutate the original dataframe and others return a copy. Solutions are heavily discussed in #79 but no conclusion has been reached. At the moment, it is unclear, without experimentation from the user, which behavior applies in each function. \r\n\r\nIn the interim, I propose to explicitly clarify this behavior in each function's docstring so the user has a clear idea regarding the function's mutating behavior. Below is a sample of what this could look like for `.clean_names()`:\r\n\r\n\"\"\"\r\nClean column names.\r\n Takes all column names, converts them to lowercase, then replaces all\r\n spaces with underscores. <b>Does not mutate original dataframe.</b>\r\n\"\"\"\r\n\r\nHappy to add this line somewhere else in the docstring if inappropriate here. \r\n\r\n- [Link to documentation page](https://pyjanitor.readthedocs.io/reference/index.html)\r\n- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/blob/dev/janitor/functions.py)\r\n\n", "before_files": [{"content": "\"\"\"\nBiology and bioinformatics-oriented data cleaning functions.\n\"\"\"\n\nimport pandas as pd\nimport pandas_flavor as pf\n\nfrom .utils import deprecated_alias, import_message\n\ntry:\n from Bio import SeqIO\nexcept ImportError:\n import_message(\n \"biology\", \"biopython\", \"conda install -c conda-forge biopython\"\n )\n\n\[email protected]_dataframe_method\n@deprecated_alias(col_name=\"column_name\")\ndef join_fasta(\n df: pd.DataFrame, filename: str, id_col: str, column_name\n) -> pd.DataFrame:\n \"\"\"\n Convenience method to join in a FASTA file as a column.\n\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n\n This function only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n\n For more advanced functions, please use phylopandas.\n\n :param df: A pandas DataFrame.\n :param filename: Path to the FASTA file.\n :param id_col: The column in the DataFrame that houses sequence IDs.\n :param column_name: The name of the new column.\n \"\"\"\n seqrecords = {\n x.id: x.seq.__str__() for x in SeqIO.parse(filename, \"fasta\")\n }\n seq_col = [seqrecords[i] for i in df[id_col]]\n df[column_name] = seq_col\n return df\n", "path": "janitor/biology.py"}], "after_files": [{"content": "\"\"\"\nBiology and bioinformatics-oriented data cleaning functions.\n\"\"\"\n\nimport pandas as pd\nimport pandas_flavor as pf\n\nfrom .utils import deprecated_alias, import_message\n\ntry:\n from Bio import SeqIO\nexcept ImportError:\n import_message(\n \"biology\", \"biopython\", \"conda install -c conda-forge biopython\"\n )\n\n\[email protected]_dataframe_method\n@deprecated_alias(col_name=\"column_name\")\ndef join_fasta(\n df: pd.DataFrame, filename: str, id_col: str, column_name\n) -> pd.DataFrame:\n \"\"\"\n Convenience method to join in a FASTA file as a column.\n\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n\n This method only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n\n This method mutates the original DataFrame.\n\n For more advanced functions, please use phylopandas.\n\n :param df: A pandas DataFrame.\n :param filename: Path to the FASTA file.\n :param id_col: The column in the DataFrame that houses sequence IDs.\n :param column_name: The name of the new column.\n \"\"\"\n seqrecords = {\n x.id: x.seq.__str__() for x in SeqIO.parse(filename, \"fasta\")\n }\n seq_col = [seqrecords[i] for i in df[id_col]]\n df[column_name] = seq_col\n return df\n", "path": "janitor/biology.py"}]} | 932 | 198 |
gh_patches_debug_25388 | rasdani/github-patches | git_diff | internetarchive__openlibrary-6283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix BWB Importbot Low Quality records
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
Importbot is importing low-quality records from unspecified source. Titles are public domain reprints (possibly print on demand) with keyword-stuffed titles.
### Evidence / Screenshot (if possible)
<img width="858" alt="Screenshot 2020-11-24 at 21 31 56" src="https://user-images.githubusercontent.com/17739465/100149057-e87a3b80-2e9d-11eb-9291-b49854aa65b7.png">
Example: https://openlibrary.org/search?q=adventures+of+sherlock+holmes+conan+doyle&mode=everything&page=3
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
Books have valid ISBNs and human editors should be allowed to add manually but these should be blocked from import by importbot.
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@mekarpeles
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/partner_batch_imports.py`
Content:
```
1 """
2 Process partner bibliographic csv data into importable json book
3 records and then batch submit into the ImportBot
4 `import_item` table (http://openlibrary.org/admin/imports)
5 which queues items to be imported via the
6 Open Library JSON import API: https://openlibrary.org/api/import
7
8 To Run:
9
10 PYTHONPATH=. python ./scripts/partner_batch_imports.py /olsystem/etc/openlibrary.yml
11 """
12
13 import os
14 import re
15 import sys
16 import web
17 import datetime
18 from datetime import timedelta
19 import logging
20 import requests
21
22 from infogami import config # noqa: F401
23 from openlibrary.config import load_config
24 from openlibrary.core.imports import Batch
25 from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
26
27 logger = logging.getLogger("openlibrary.importer.bwb")
28
29 SCHEMA_URL = (
30 "https://raw.githubusercontent.com/internetarchive"
31 "/openlibrary-client/master/olclient/schemata/import.schema.json"
32 )
33
34
35 class Biblio:
36
37 ACTIVE_FIELDS = [
38 'title',
39 'isbn_13',
40 'publish_date',
41 'publishers',
42 'weight',
43 'authors',
44 'lc_classifications',
45 'pagination',
46 'languages',
47 'subjects',
48 'source_records',
49 ]
50 INACTIVE_FIELDS = [
51 "copyright",
52 "issn",
53 "doi",
54 "lccn",
55 "dewey",
56 "length",
57 "width",
58 "height",
59 ]
60 REQUIRED_FIELDS = requests.get(SCHEMA_URL).json()['required']
61
62 NONBOOK = """A2 AA AB AJ AVI AZ BK BM C3 CD CE CF CR CRM CRW CX D3 DA DD DF DI DL DO DR
63 DRM DRW DS DV EC FC FI FM FR FZ GB GC GM GR H3 H5 L3 L5 LP MAC MC MF MG MH ML MS MSX MZ
64 N64 NGA NGB NGC NGE NT OR OS PC PP PRP PS PSC PY QU RE RV SA SD SG SH SK SL SMD SN SO SO1
65 SO2 SR SU TA TB TR TS TY UX V35 V8 VC VD VE VF VK VM VN VO VP VS VU VY VZ WA WC WI WL WM
66 WP WT WX XL XZ ZF ZZ""".split()
67
68 def __init__(self, data):
69 self.isbn = data[124]
70 self.source_id = 'bwb:%s' % self.isbn
71 self.isbn_13 = [self.isbn]
72 self.title = data[10]
73 self.primary_format = data[6]
74 self.publish_date = data[20][:4] # YYYY, YYYYMMDD
75 self.publishers = [data[135]]
76 self.weight = data[39]
77 self.authors = self.contributors(data)
78 self.lc_classifications = [data[147]] if data[147] else []
79 self.pagination = data[36]
80 self.languages = [data[37].lower()]
81 self.source_records = [self.source_id]
82 self.subjects = [
83 s.capitalize().replace('_', ', ')
84 for s in data[91:100]
85 # + data[101:120]
86 # + data[153:158]
87 if s
88 ]
89
90 # Inactive fields
91 self.copyright = data[19]
92 self.issn = data[54]
93 self.doi = data[145]
94 self.lccn = data[146]
95 self.dewey = data[49]
96 # physical_dimensions
97 # e.g. "5.4 x 4.7 x 0.2 inches"
98 self.length, self.width, self.height = data[40:43]
99
100 # Assert importable
101 for field in self.REQUIRED_FIELDS + ['isbn_13']:
102 assert getattr(self, field), field
103 # This seems to be eliminating books too aggressively
104 #assert self.primary_format not in self.NONBOOK, f"{self.primary_format} is NONBOOK"
105
106 @staticmethod
107 def contributors(data):
108 def make_author(name, _, typ):
109 author = {'name': name}
110 if typ == 'X':
111 # set corporate contributor
112 author['entity_type'] = 'org'
113 # TODO: sort out contributor types
114 # AU = author
115 # ED = editor
116 return author
117
118 contributors = (
119 (data[21 + i * 3], data[22 + i * 3], data[23 + i * 3]) for i in range(5)
120 )
121
122 # form list of author dicts
123 authors = [make_author(*c) for c in contributors if c[0]]
124 return authors
125
126 def json(self):
127 return {
128 field: getattr(self, field)
129 for field in self.ACTIVE_FIELDS
130 if getattr(self, field)
131 }
132
133
134 def load_state(path, logfile):
135 """Retrieves starting point from logfile, if log exists
136
137 Takes as input a path which expands to an ordered candidate list
138 of bettworldbks* filenames to process, the location of the
139 logfile, and determines which of those files are remaining, as
140 well as what our offset is in that file.
141
142 e.g. if we request path containing f1, f2, f3 and our log
143 says f2,100 then we start our processing at f2 at the 100th line.
144
145 This assumes the script is being called w/ e.g.:
146 /1/var/tmp/imports/2021-08/Bibliographic/*/
147 """
148 filenames = sorted(
149 os.path.join(path, f) for f in os.listdir(path) if f.startswith("bettworldbks")
150 )
151 try:
152 with open(logfile) as fin:
153 active_fname, offset = next(fin).strip().split(',')
154 unfinished_filenames = filenames[filenames.index(active_fname) :]
155 return unfinished_filenames, int(offset)
156 except (ValueError, OSError):
157 return filenames, 0
158
159
160 def update_state(logfile, fname, line_num=0):
161 """Records the last file we began processing and the current line"""
162 with open(logfile, 'w') as fout:
163 fout.write(f'{fname},{line_num}\n')
164
165
166 def csv_to_ol_json_item(line):
167 """converts a line to a book item"""
168 try:
169 data = line.decode().strip().split('|')
170 except UnicodeDecodeError:
171 data = line.decode('ISO-8859-1').strip().split('|')
172
173 b = Biblio(data)
174 return {'ia_id': b.source_id, 'data': b.json()}
175
176
177 def batch_import(path, batch, batch_size=5000):
178 logfile = os.path.join(path, 'import.log')
179 filenames, offset = load_state(path, logfile)
180
181 for fname in filenames:
182 book_items = []
183 with open(fname, 'rb') as f:
184 logger.info(f"Processing: {fname} from line {offset}")
185 for line_num, line in enumerate(f):
186
187 # skip over already processed records
188 if offset:
189 if offset > line_num:
190 continue
191 offset = 0
192
193 try:
194 book_items.append(csv_to_ol_json_item(line))
195 except AssertionError as e:
196 logger.info(f"Error: {e} from {line}")
197
198 # If we have enough items, submit a batch
199 if not ((line_num + 1) % batch_size):
200 batch.add_items(book_items)
201 update_state(logfile, fname, line_num)
202 book_items = [] # clear added items
203
204 # Add any remaining book_items to batch
205 if book_items:
206 batch.add_items(book_items)
207 update_state(logfile, fname, line_num)
208
209
210 def main(ol_config: str, batch_path: str):
211 load_config(ol_config)
212
213 # Partner data is offset ~15 days from start of month
214 date = datetime.date.today() - timedelta(days=15)
215 batch_name = "%s-%04d%02d" % ('bwb', date.year, date.month)
216 batch = Batch.find(batch_name) or Batch.new(batch_name)
217 batch_import(batch_path, batch)
218
219
220 if __name__ == '__main__':
221 FnToCLI(main).run()
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/partner_batch_imports.py b/scripts/partner_batch_imports.py
--- a/scripts/partner_batch_imports.py
+++ b/scripts/partner_batch_imports.py
@@ -173,6 +173,10 @@
b = Biblio(data)
return {'ia_id': b.source_id, 'data': b.json()}
+def is_low_quality_book(book_item):
+ """check if a book item is of low quality"""
+ return ("notebook" in book_item.title.casefold() and "independently published" in book_item.publisher.casefold())
+
def batch_import(path, batch, batch_size=5000):
logfile = os.path.join(path, 'import.log')
@@ -191,7 +195,9 @@
offset = 0
try:
- book_items.append(csv_to_ol_json_item(line))
+ book_item = csv_to_ol_json_item(line)
+ if not is_low_quality_book(book_item["data"]):
+ book_items.append(book_item)
except AssertionError as e:
logger.info(f"Error: {e} from {line}")
@@ -206,7 +212,6 @@
batch.add_items(book_items)
update_state(logfile, fname, line_num)
-
def main(ol_config: str, batch_path: str):
load_config(ol_config)
| {"golden_diff": "diff --git a/scripts/partner_batch_imports.py b/scripts/partner_batch_imports.py\n--- a/scripts/partner_batch_imports.py\n+++ b/scripts/partner_batch_imports.py\n@@ -173,6 +173,10 @@\n b = Biblio(data)\n return {'ia_id': b.source_id, 'data': b.json()}\n \n+def is_low_quality_book(book_item):\n+ \"\"\"check if a book item is of low quality\"\"\"\n+ return (\"notebook\" in book_item.title.casefold() and \"independently published\" in book_item.publisher.casefold())\n+\n \n def batch_import(path, batch, batch_size=5000):\n logfile = os.path.join(path, 'import.log')\n@@ -191,7 +195,9 @@\n offset = 0\n \n try:\n- book_items.append(csv_to_ol_json_item(line))\n+ book_item = csv_to_ol_json_item(line)\n+ if not is_low_quality_book(book_item[\"data\"]):\n+ book_items.append(book_item)\n except AssertionError as e:\n logger.info(f\"Error: {e} from {line}\")\n \n@@ -206,7 +212,6 @@\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n \n-\n def main(ol_config: str, batch_path: str):\n load_config(ol_config)\n", "issue": "Fix BWB Importbot Low Quality records\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\nImportbot is importing low-quality records from unspecified source. Titles are public domain reprints (possibly print on demand) with keyword-stuffed titles. \r\n\r\n### Evidence / Screenshot (if possible)\r\n<img width=\"858\" alt=\"Screenshot 2020-11-24 at 21 31 56\" src=\"https://user-images.githubusercontent.com/17739465/100149057-e87a3b80-2e9d-11eb-9291-b49854aa65b7.png\">\r\n\r\nExample: https://openlibrary.org/search?q=adventures+of+sherlock+holmes+conan+doyle&mode=everything&page=3\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nBooks have valid ISBNs and human editors should be allowed to add manually but these should be blocked from import by importbot.\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@mekarpeles \n", "before_files": [{"content": "\"\"\"\nProcess partner bibliographic csv data into importable json book\nrecords and then batch submit into the ImportBot\n`import_item` table (http://openlibrary.org/admin/imports)\nwhich queues items to be imported via the\nOpen Library JSON import API: https://openlibrary.org/api/import\n\nTo Run:\n\nPYTHONPATH=. python ./scripts/partner_batch_imports.py /olsystem/etc/openlibrary.yml\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport web\nimport datetime\nfrom datetime import timedelta\nimport logging\nimport requests\n\nfrom infogami import config # noqa: F401\nfrom openlibrary.config import load_config\nfrom openlibrary.core.imports import Batch\nfrom scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI\n\nlogger = logging.getLogger(\"openlibrary.importer.bwb\")\n\nSCHEMA_URL = (\n \"https://raw.githubusercontent.com/internetarchive\"\n \"/openlibrary-client/master/olclient/schemata/import.schema.json\"\n)\n\n\nclass Biblio:\n\n ACTIVE_FIELDS = [\n 'title',\n 'isbn_13',\n 'publish_date',\n 'publishers',\n 'weight',\n 'authors',\n 'lc_classifications',\n 'pagination',\n 'languages',\n 'subjects',\n 'source_records',\n ]\n INACTIVE_FIELDS = [\n \"copyright\",\n \"issn\",\n \"doi\",\n \"lccn\",\n \"dewey\",\n \"length\",\n \"width\",\n \"height\",\n ]\n REQUIRED_FIELDS = requests.get(SCHEMA_URL).json()['required']\n\n NONBOOK = \"\"\"A2 AA AB AJ AVI AZ BK BM C3 CD CE CF CR CRM CRW CX D3 DA DD DF DI DL DO DR\n DRM DRW DS DV EC FC FI FM FR FZ GB GC GM GR H3 H5 L3 L5 LP MAC MC MF MG MH ML MS MSX MZ\n N64 NGA NGB NGC NGE NT OR OS PC PP PRP PS PSC PY QU RE RV SA SD SG SH SK SL SMD SN SO SO1\n SO2 SR SU TA TB TR TS TY UX V35 V8 VC VD VE VF VK VM VN VO VP VS VU VY VZ WA WC WI WL WM\n WP WT WX XL XZ ZF ZZ\"\"\".split()\n\n def __init__(self, data):\n self.isbn = data[124]\n self.source_id = 'bwb:%s' % self.isbn\n self.isbn_13 = [self.isbn]\n self.title = data[10]\n self.primary_format = data[6]\n self.publish_date = data[20][:4] # YYYY, YYYYMMDD\n self.publishers = [data[135]]\n self.weight = data[39]\n self.authors = self.contributors(data)\n self.lc_classifications = [data[147]] if data[147] else []\n self.pagination = data[36]\n self.languages = [data[37].lower()]\n self.source_records = [self.source_id]\n self.subjects = [\n s.capitalize().replace('_', ', ')\n for s in data[91:100]\n # + data[101:120]\n # + data[153:158]\n if s\n ]\n\n # Inactive fields\n self.copyright = data[19]\n self.issn = data[54]\n self.doi = data[145]\n self.lccn = data[146]\n self.dewey = data[49]\n # physical_dimensions\n # e.g. \"5.4 x 4.7 x 0.2 inches\"\n self.length, self.width, self.height = data[40:43]\n\n # Assert importable\n for field in self.REQUIRED_FIELDS + ['isbn_13']:\n assert getattr(self, field), field\n # This seems to be eliminating books too aggressively\n #assert self.primary_format not in self.NONBOOK, f\"{self.primary_format} is NONBOOK\"\n\n @staticmethod\n def contributors(data):\n def make_author(name, _, typ):\n author = {'name': name}\n if typ == 'X':\n # set corporate contributor\n author['entity_type'] = 'org'\n # TODO: sort out contributor types\n # AU = author\n # ED = editor\n return author\n\n contributors = (\n (data[21 + i * 3], data[22 + i * 3], data[23 + i * 3]) for i in range(5)\n )\n\n # form list of author dicts\n authors = [make_author(*c) for c in contributors if c[0]]\n return authors\n\n def json(self):\n return {\n field: getattr(self, field)\n for field in self.ACTIVE_FIELDS\n if getattr(self, field)\n }\n\n\ndef load_state(path, logfile):\n \"\"\"Retrieves starting point from logfile, if log exists\n\n Takes as input a path which expands to an ordered candidate list\n of bettworldbks* filenames to process, the location of the\n logfile, and determines which of those files are remaining, as\n well as what our offset is in that file.\n\n e.g. if we request path containing f1, f2, f3 and our log\n says f2,100 then we start our processing at f2 at the 100th line.\n\n This assumes the script is being called w/ e.g.:\n /1/var/tmp/imports/2021-08/Bibliographic/*/\n \"\"\"\n filenames = sorted(\n os.path.join(path, f) for f in os.listdir(path) if f.startswith(\"bettworldbks\")\n )\n try:\n with open(logfile) as fin:\n active_fname, offset = next(fin).strip().split(',')\n unfinished_filenames = filenames[filenames.index(active_fname) :]\n return unfinished_filenames, int(offset)\n except (ValueError, OSError):\n return filenames, 0\n\n\ndef update_state(logfile, fname, line_num=0):\n \"\"\"Records the last file we began processing and the current line\"\"\"\n with open(logfile, 'w') as fout:\n fout.write(f'{fname},{line_num}\\n')\n\n\ndef csv_to_ol_json_item(line):\n \"\"\"converts a line to a book item\"\"\"\n try:\n data = line.decode().strip().split('|')\n except UnicodeDecodeError:\n data = line.decode('ISO-8859-1').strip().split('|')\n\n b = Biblio(data)\n return {'ia_id': b.source_id, 'data': b.json()}\n\n\ndef batch_import(path, batch, batch_size=5000):\n logfile = os.path.join(path, 'import.log')\n filenames, offset = load_state(path, logfile)\n\n for fname in filenames:\n book_items = []\n with open(fname, 'rb') as f:\n logger.info(f\"Processing: {fname} from line {offset}\")\n for line_num, line in enumerate(f):\n\n # skip over already processed records\n if offset:\n if offset > line_num:\n continue\n offset = 0\n\n try:\n book_items.append(csv_to_ol_json_item(line))\n except AssertionError as e:\n logger.info(f\"Error: {e} from {line}\")\n\n # If we have enough items, submit a batch\n if not ((line_num + 1) % batch_size):\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n book_items = [] # clear added items\n\n # Add any remaining book_items to batch\n if book_items:\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n\n\ndef main(ol_config: str, batch_path: str):\n load_config(ol_config)\n\n # Partner data is offset ~15 days from start of month\n date = datetime.date.today() - timedelta(days=15)\n batch_name = \"%s-%04d%02d\" % ('bwb', date.year, date.month)\n batch = Batch.find(batch_name) or Batch.new(batch_name)\n batch_import(batch_path, batch)\n\n\nif __name__ == '__main__':\n FnToCLI(main).run()\n", "path": "scripts/partner_batch_imports.py"}], "after_files": [{"content": "\"\"\"\nProcess partner bibliographic csv data into importable json book\nrecords and then batch submit into the ImportBot\n`import_item` table (http://openlibrary.org/admin/imports)\nwhich queues items to be imported via the\nOpen Library JSON import API: https://openlibrary.org/api/import\n\nTo Run:\n\nPYTHONPATH=. python ./scripts/partner_batch_imports.py /olsystem/etc/openlibrary.yml\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport web\nimport datetime\nfrom datetime import timedelta\nimport logging\nimport requests\n\nfrom infogami import config # noqa: F401\nfrom openlibrary.config import load_config\nfrom openlibrary.core.imports import Batch\nfrom scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI\n\nlogger = logging.getLogger(\"openlibrary.importer.bwb\")\n\nSCHEMA_URL = (\n \"https://raw.githubusercontent.com/internetarchive\"\n \"/openlibrary-client/master/olclient/schemata/import.schema.json\"\n)\n\n\nclass Biblio:\n\n ACTIVE_FIELDS = [\n 'title',\n 'isbn_13',\n 'publish_date',\n 'publishers',\n 'weight',\n 'authors',\n 'lc_classifications',\n 'pagination',\n 'languages',\n 'subjects',\n 'source_records',\n ]\n INACTIVE_FIELDS = [\n \"copyright\",\n \"issn\",\n \"doi\",\n \"lccn\",\n \"dewey\",\n \"length\",\n \"width\",\n \"height\",\n ]\n REQUIRED_FIELDS = requests.get(SCHEMA_URL).json()['required']\n\n NONBOOK = \"\"\"A2 AA AB AJ AVI AZ BK BM C3 CD CE CF CR CRM CRW CX D3 DA DD DF DI DL DO DR\n DRM DRW DS DV EC FC FI FM FR FZ GB GC GM GR H3 H5 L3 L5 LP MAC MC MF MG MH ML MS MSX MZ\n N64 NGA NGB NGC NGE NT OR OS PC PP PRP PS PSC PY QU RE RV SA SD SG SH SK SL SMD SN SO SO1\n SO2 SR SU TA TB TR TS TY UX V35 V8 VC VD VE VF VK VM VN VO VP VS VU VY VZ WA WC WI WL WM\n WP WT WX XL XZ ZF ZZ\"\"\".split()\n\n def __init__(self, data):\n self.isbn = data[124]\n self.source_id = 'bwb:%s' % self.isbn\n self.isbn_13 = [self.isbn]\n self.title = data[10]\n self.primary_format = data[6]\n self.publish_date = data[20][:4] # YYYY, YYYYMMDD\n self.publishers = [data[135]]\n self.weight = data[39]\n self.authors = self.contributors(data)\n self.lc_classifications = [data[147]] if data[147] else []\n self.pagination = data[36]\n self.languages = [data[37].lower()]\n self.source_records = [self.source_id]\n self.subjects = [\n s.capitalize().replace('_', ', ')\n for s in data[91:100]\n # + data[101:120]\n # + data[153:158]\n if s\n ]\n\n # Inactive fields\n self.copyright = data[19]\n self.issn = data[54]\n self.doi = data[145]\n self.lccn = data[146]\n self.dewey = data[49]\n # physical_dimensions\n # e.g. \"5.4 x 4.7 x 0.2 inches\"\n self.length, self.width, self.height = data[40:43]\n\n # Assert importable\n for field in self.REQUIRED_FIELDS + ['isbn_13']:\n assert getattr(self, field), field\n # This seems to be eliminating books too aggressively\n #assert self.primary_format not in self.NONBOOK, f\"{self.primary_format} is NONBOOK\"\n\n @staticmethod\n def contributors(data):\n def make_author(name, _, typ):\n author = {'name': name}\n if typ == 'X':\n # set corporate contributor\n author['entity_type'] = 'org'\n # TODO: sort out contributor types\n # AU = author\n # ED = editor\n return author\n\n contributors = (\n (data[21 + i * 3], data[22 + i * 3], data[23 + i * 3]) for i in range(5)\n )\n\n # form list of author dicts\n authors = [make_author(*c) for c in contributors if c[0]]\n return authors\n\n def json(self):\n return {\n field: getattr(self, field)\n for field in self.ACTIVE_FIELDS\n if getattr(self, field)\n }\n\n\ndef load_state(path, logfile):\n \"\"\"Retrieves starting point from logfile, if log exists\n\n Takes as input a path which expands to an ordered candidate list\n of bettworldbks* filenames to process, the location of the\n logfile, and determines which of those files are remaining, as\n well as what our offset is in that file.\n\n e.g. if we request path containing f1, f2, f3 and our log\n says f2,100 then we start our processing at f2 at the 100th line.\n\n This assumes the script is being called w/ e.g.:\n /1/var/tmp/imports/2021-08/Bibliographic/*/\n \"\"\"\n filenames = sorted(\n os.path.join(path, f) for f in os.listdir(path) if f.startswith(\"bettworldbks\")\n )\n try:\n with open(logfile) as fin:\n active_fname, offset = next(fin).strip().split(',')\n unfinished_filenames = filenames[filenames.index(active_fname) :]\n return unfinished_filenames, int(offset)\n except (ValueError, OSError):\n return filenames, 0\n\n\ndef update_state(logfile, fname, line_num=0):\n \"\"\"Records the last file we began processing and the current line\"\"\"\n with open(logfile, 'w') as fout:\n fout.write(f'{fname},{line_num}\\n')\n\n\ndef csv_to_ol_json_item(line):\n \"\"\"converts a line to a book item\"\"\"\n try:\n data = line.decode().strip().split('|')\n except UnicodeDecodeError:\n data = line.decode('ISO-8859-1').strip().split('|')\n\n b = Biblio(data)\n return {'ia_id': b.source_id, 'data': b.json()}\n\ndef is_low_quality_book(book_item):\n \"\"\"check if a book item is of low quality\"\"\"\n return (\"notebook\" in book_item.title.casefold() and \"independently published\" in book_item.publisher.casefold())\n\n\ndef batch_import(path, batch, batch_size=5000):\n logfile = os.path.join(path, 'import.log')\n filenames, offset = load_state(path, logfile)\n\n for fname in filenames:\n book_items = []\n with open(fname, 'rb') as f:\n logger.info(f\"Processing: {fname} from line {offset}\")\n for line_num, line in enumerate(f):\n\n # skip over already processed records\n if offset:\n if offset > line_num:\n continue\n offset = 0\n\n try:\n book_item = csv_to_ol_json_item(line)\n if not is_low_quality_book(book_item[\"data\"]):\n book_items.append(book_item)\n except AssertionError as e:\n logger.info(f\"Error: {e} from {line}\")\n\n # If we have enough items, submit a batch\n if not ((line_num + 1) % batch_size):\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n book_items = [] # clear added items\n\n # Add any remaining book_items to batch\n if book_items:\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n\ndef main(ol_config: str, batch_path: str):\n load_config(ol_config)\n\n # Partner data is offset ~15 days from start of month\n date = datetime.date.today() - timedelta(days=15)\n batch_name = \"%s-%04d%02d\" % ('bwb', date.year, date.month)\n batch = Batch.find(batch_name) or Batch.new(batch_name)\n batch_import(batch_path, batch)\n\n\nif __name__ == '__main__':\n FnToCLI(main).run()\n", "path": "scripts/partner_batch_imports.py"}]} | 2,973 | 304 |
gh_patches_debug_32322 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1365 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Number scale not persisting
## Reproduce
1. In the front end, edit the DB options for a Number column.
1. Set "Number Type" to "Decimal". Set "Decimal Places" to "3". Click Save.
1. Observe a PATCH request to the columns API which specifies `type_options.scale` to `2`. Good.
1. In the response of this request (and also in the response of the subsequent GET request to the columns endpoint), expect the column to show `type_options` with an object containing a `scale` of `2`.
1. Instead, observe `type_options` to be `null`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/columns.py`
Content:
```
1 from rest_framework import serializers
2 from rest_framework.exceptions import ValidationError
3 from rest_framework.fields import empty
4 from rest_framework.settings import api_settings
5
6 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
7 from mathesar.api.serializers.shared_serializers import (
8 DisplayOptionsMappingSerializer,
9 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,
10 )
11 from mathesar.models import Column
12
13
14 class InputValueField(serializers.CharField):
15 """
16 Takes in an arbitrary value. Emulates the record creation endpoint,
17 which takes in arbitrary values (un-validated and un-processed request.data).
18 This field replicates that behavior in a serializer.
19 """
20
21 def to_internal_value(self, data):
22 return data
23
24 def to_representation(self, value):
25 return value
26
27
28 class TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):
29 length = serializers.IntegerField(required=False)
30 precision = serializers.IntegerField(required=False)
31 scale = serializers.IntegerField(required=False)
32 fields = serializers.CharField(required=False)
33
34 def run_validation(self, data=empty):
35 # Ensure that there are no unknown type options passed in.
36 if data is not empty and data is not None:
37 unknown = set(data) - set(self.fields)
38 if unknown:
39 errors = ['Unknown field: {}'.format(field) for field in unknown]
40 raise serializers.ValidationError({
41 api_settings.NON_FIELD_ERRORS_KEY: errors,
42 })
43
44 return super(TypeOptionSerializer, self).run_validation(data)
45
46
47 class SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
48 class Meta:
49 model = Column
50 fields = ('id',
51 'name',
52 'type',
53 'type_options',
54 'display_options'
55 )
56 name = serializers.CharField()
57 type = serializers.CharField(source='plain_type')
58 type_options = TypeOptionSerializer(required=False, allow_null=True)
59 display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)
60
61 def to_representation(self, instance):
62 if isinstance(instance, dict):
63 instance_type = instance.get('type')
64 else:
65 instance_type = instance.plain_type
66 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)
67 return super().to_representation(instance)
68
69 def to_internal_value(self, data):
70 if self.partial and 'type' not in data:
71 instance_type = getattr(self.instance, 'plain_type', None)
72 if instance_type is not None:
73 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)
74 else:
75 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = data.get('type', None)
76 return super().to_internal_value(data)
77
78
79 class ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):
80 value = InputValueField()
81 is_dynamic = serializers.BooleanField(read_only=True)
82
83
84 class ColumnSerializer(SimpleColumnSerializer):
85 class Meta(SimpleColumnSerializer.Meta):
86 fields = SimpleColumnSerializer.Meta.fields + (
87 'nullable',
88 'primary_key',
89 'source_column',
90 'copy_source_data',
91 'copy_source_constraints',
92 'valid_target_types',
93 'default'
94 )
95 model_fields = ('display_options',)
96
97 name = serializers.CharField(required=False, allow_blank=True)
98
99 # From scratch fields
100 type = serializers.CharField(source='plain_type', required=False)
101 nullable = serializers.BooleanField(default=True)
102 primary_key = serializers.BooleanField(default=False)
103 default = ColumnDefaultSerializer(
104 source='column_default_dict', required=False, allow_null=True, default=None
105 )
106
107 # From duplication fields
108 source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)
109 copy_source_data = serializers.BooleanField(default=True, write_only=True)
110 copy_source_constraints = serializers.BooleanField(default=True, write_only=True)
111
112 # Read only fields
113 valid_target_types = serializers.ListField(read_only=True)
114
115 def validate(self, data):
116 data = super().validate(data)
117 # Reevaluate column display options based on the new column type.
118 if 'plain_type' in data and 'display_options' not in data:
119 if self.instance:
120 instance_type = getattr(self.instance, 'plain_type', None)
121 # Invalidate display_options if type has been changed
122 if str(instance_type) != data['plain_type']:
123 data['display_options'] = None
124 else:
125 data['display_options'] = None
126 if not self.partial:
127 from_scratch_required_fields = ['type']
128 from_scratch_specific_fields = ['type', 'nullable', 'primary_key']
129 from_dupe_required_fields = ['source_column']
130 from_dupe_specific_fields = ['source_column', 'copy_source_data',
131 'copy_source_constraints']
132
133 # Note that we run validation on self.initial_data, as `data` has defaults
134 # filled in for fields that weren't specified by the request
135 from_scratch_required_all = all([
136 f in self.initial_data for f in from_scratch_required_fields
137 ])
138 from_scratch_specific_in = [
139 f for f in from_scratch_specific_fields if f in self.initial_data
140 ]
141 from_dupe_required_all = all([
142 f in self.initial_data for f in from_dupe_required_fields
143 ])
144 from_dupe_specific_in = [
145 f for f in from_dupe_specific_fields if f in self.initial_data
146 ]
147
148 if len(from_dupe_specific_in) and len(from_scratch_specific_in):
149 raise ValidationError(
150 f'{from_scratch_specific_in} cannot be passed in if '
151 f'{from_dupe_specific_in} has also been passed in.'
152 )
153 elif not from_dupe_required_all and not from_scratch_required_all:
154 # We default to from scratch required fields if no fields are passed
155 if len(from_dupe_specific_in) and not len(from_scratch_specific_in):
156 required_fields = from_dupe_required_fields
157 else:
158 required_fields = from_scratch_required_fields
159 raise ValidationError({
160 f: ['This field is required.']
161 for f in required_fields
162 if f not in self.initial_data
163 })
164 return data
165
166 @property
167 def validated_model_fields(self):
168 return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}
169
```
Path: `mathesar/api/db/viewsets/columns.py`
Content:
```
1 import warnings
2 from psycopg2.errors import DuplicateColumn
3 from rest_framework import status, viewsets
4 from rest_framework.exceptions import NotFound
5 from rest_framework.response import Response
6 from sqlalchemy.exc import ProgrammingError
7
8 from mathesar.api.exceptions.database_exceptions import (
9 exceptions as database_api_exceptions,
10 base_exceptions as database_base_api_exceptions,
11 )
12 from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
13 from db.columns.exceptions import (
14 DynamicDefaultWarning, InvalidDefaultError, InvalidTypeOptionError, InvalidTypeError,
15 )
16 from db.columns.operations.select import get_column_attnum_from_name
17 from db.types.exceptions import InvalidTypeParameters
18 from db.records.exceptions import UndefinedFunction
19 from mathesar.api.pagination import DefaultLimitOffsetPagination
20 from mathesar.api.serializers.columns import ColumnSerializer
21 from mathesar.api.utils import get_table_or_404
22 from mathesar.models import Column
23
24
25 class ColumnViewSet(viewsets.ModelViewSet):
26 serializer_class = ColumnSerializer
27 pagination_class = DefaultLimitOffsetPagination
28
29 def get_queryset(self):
30 return Column.objects.filter(table=self.kwargs['table_pk'])
31
32 def create(self, request, table_pk=None):
33 table = get_table_or_404(table_pk)
34 # We only support adding a single column through the API.
35 serializer = ColumnSerializer(data=request.data, context={'request': request})
36 serializer.is_valid(raise_exception=True)
37
38 if 'source_column' in serializer.validated_data:
39 column = table.duplicate_column(
40 serializer.validated_data['source_column'],
41 serializer.validated_data['copy_source_data'],
42 serializer.validated_data['copy_source_constraints'],
43 serializer.validated_data.get('name'),
44 )
45 else:
46 try:
47 column = table.add_column(request.data)
48 except ProgrammingError as e:
49 if type(e.orig) == DuplicateColumn:
50 name = request.data['name']
51 raise database_api_exceptions.DuplicateTableAPIException(
52 e,
53 message=f'Column {name} already exists',
54 field='name',
55 status_code=status.HTTP_400_BAD_REQUEST
56 )
57 else:
58 raise database_base_api_exceptions.ProgrammingAPIException(e)
59 except TypeError as e:
60 raise base_api_exceptions.TypeErrorAPIException(
61 e,
62 message="Unknown type_option passed",
63 status_code=status.HTTP_400_BAD_REQUEST
64 )
65 except InvalidDefaultError as e:
66 raise database_api_exceptions.InvalidDefaultAPIException(
67 e,
68 message=f'default "{request.data["default"]}" is invalid for type {request.data["type"]}',
69 status_code=status.HTTP_400_BAD_REQUEST
70 )
71 except (InvalidTypeOptionError, InvalidTypeParameters) as e:
72 type_options = request.data.get('type_options', '')
73 raise database_api_exceptions.InvalidTypeOptionAPIException(
74 e,
75 message=f'parameter dict {type_options} is invalid for type {request.data["type"]}',
76 field="type_options",
77 status_code=status.HTTP_400_BAD_REQUEST
78 )
79 except InvalidTypeError as e:
80 raise database_api_exceptions.InvalidTypeCastAPIException(
81 e,
82 message='This type casting is invalid.',
83 status_code=status.HTTP_400_BAD_REQUEST
84 )
85 dj_column = Column(
86 table=table,
87 attnum=get_column_attnum_from_name(table.oid, column.name, table.schema._sa_engine),
88 **serializer.validated_model_fields
89 )
90 dj_column.save()
91 out_serializer = ColumnSerializer(dj_column)
92 return Response(out_serializer.data, status=status.HTTP_201_CREATED)
93
94 def partial_update(self, request, pk=None, table_pk=None):
95 column_instance = self.get_object()
96 table = column_instance.table
97 serializer = ColumnSerializer(instance=column_instance, data=request.data, partial=True)
98 serializer.is_valid(raise_exception=True)
99 with warnings.catch_warnings():
100 warnings.filterwarnings("error", category=DynamicDefaultWarning)
101 try:
102 table.alter_column(column_instance._sa_column.column_attnum, serializer.validated_data)
103 except UndefinedFunction as e:
104 raise database_api_exceptions.UndefinedFunctionAPIException(
105 e,
106 message='This type cast is not implemented',
107 status_code=status.HTTP_400_BAD_REQUEST
108 )
109 except ProgrammingError as e:
110 raise database_base_api_exceptions.ProgrammingAPIException(
111 e,
112 status_code=status.HTTP_400_BAD_REQUEST
113 )
114 except IndexError as e:
115 raise base_api_exceptions.NotFoundAPIException(e)
116 except TypeError as e:
117 raise database_api_exceptions.InvalidTypeOptionAPIException(
118 e,
119 message="Unknown type_option passed",
120 status_code=status.HTTP_400_BAD_REQUEST
121 )
122 except InvalidDefaultError as e:
123 raise database_api_exceptions.InvalidDefaultAPIException(
124 e,
125 message=f'default "{request.data["default"]}" is invalid for this column',
126 status_code=status.HTTP_400_BAD_REQUEST
127 )
128 except DynamicDefaultWarning as e:
129 raise database_api_exceptions.DynamicDefaultAPIException(
130 e,
131 message='Changing type of columns with dynamically-generated defaults is not supported.'
132 'Delete or change the default first.',
133 status_code=status.HTTP_400_BAD_REQUEST
134 )
135 except (InvalidTypeOptionError, InvalidTypeParameters) as e:
136 type_options = request.data.get('type_options', '')
137 raise database_api_exceptions.InvalidTypeOptionAPIException(
138 e,
139 message=f'parameter dict {type_options} is invalid for type {request.data["type"]}',
140 status_code=status.HTTP_400_BAD_REQUEST
141 )
142 except InvalidTypeError as e:
143 raise database_api_exceptions.InvalidTypeCastAPIException(
144 e,
145 message='This type casting is invalid.',
146 status_code=status.HTTP_400_BAD_REQUEST
147 )
148 except Exception as e:
149 raise base_api_exceptions.MathesarAPIException(e)
150
151 serializer.update(column_instance, serializer.validated_model_fields)
152 # Invalidate the cache as the underlying columns have changed
153 column_instance = self.get_object()
154 out_serializer = ColumnSerializer(column_instance)
155 return Response(out_serializer.data)
156
157 def destroy(self, request, pk=None, table_pk=None):
158 column_instance = self.get_object()
159 table = column_instance.table
160 try:
161 table.drop_column(column_instance.attnum)
162 column_instance.delete()
163 except IndexError:
164 raise NotFound
165 return Response(status=status.HTTP_204_NO_CONTENT)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/db/viewsets/columns.py b/mathesar/api/db/viewsets/columns.py
--- a/mathesar/api/db/viewsets/columns.py
+++ b/mathesar/api/db/viewsets/columns.py
@@ -34,7 +34,12 @@
# We only support adding a single column through the API.
serializer = ColumnSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
-
+ type_options = request.data.get('type_options', None)
+ if type_options is not None:
+ scale = type_options.get('scale', None)
+ precision = type_options.get('precision', None)
+ if scale is not None and precision is None:
+ request.data['type_options']['precision'] = 1000
if 'source_column' in serializer.validated_data:
column = table.duplicate_column(
serializer.validated_data['source_column'],
@@ -44,6 +49,7 @@
)
else:
try:
+ # TODO Refactor add_column to user serializer validated date instead of request data
column = table.add_column(request.data)
except ProgrammingError as e:
if type(e.orig) == DuplicateColumn:
diff --git a/mathesar/api/serializers/columns.py b/mathesar/api/serializers/columns.py
--- a/mathesar/api/serializers/columns.py
+++ b/mathesar/api/serializers/columns.py
@@ -31,6 +31,11 @@
scale = serializers.IntegerField(required=False)
fields = serializers.CharField(required=False)
+ def validate(self, attrs):
+ if attrs.get('scale', None) is not None and attrs.get('precision', None) is None:
+ attrs['precision'] = 1000
+ return super().validate(attrs)
+
def run_validation(self, data=empty):
# Ensure that there are no unknown type options passed in.
if data is not empty and data is not None:
| {"golden_diff": "diff --git a/mathesar/api/db/viewsets/columns.py b/mathesar/api/db/viewsets/columns.py\n--- a/mathesar/api/db/viewsets/columns.py\n+++ b/mathesar/api/db/viewsets/columns.py\n@@ -34,7 +34,12 @@\n # We only support adding a single column through the API.\n serializer = ColumnSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n-\n+ type_options = request.data.get('type_options', None)\n+ if type_options is not None:\n+ scale = type_options.get('scale', None)\n+ precision = type_options.get('precision', None)\n+ if scale is not None and precision is None:\n+ request.data['type_options']['precision'] = 1000\n if 'source_column' in serializer.validated_data:\n column = table.duplicate_column(\n serializer.validated_data['source_column'],\n@@ -44,6 +49,7 @@\n )\n else:\n try:\n+ # TODO Refactor add_column to user serializer validated date instead of request data\n column = table.add_column(request.data)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateColumn:\ndiff --git a/mathesar/api/serializers/columns.py b/mathesar/api/serializers/columns.py\n--- a/mathesar/api/serializers/columns.py\n+++ b/mathesar/api/serializers/columns.py\n@@ -31,6 +31,11 @@\n scale = serializers.IntegerField(required=False)\n fields = serializers.CharField(required=False)\n \n+ def validate(self, attrs):\n+ if attrs.get('scale', None) is not None and attrs.get('precision', None) is None:\n+ attrs['precision'] = 1000\n+ return super().validate(attrs)\n+\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty and data is not None:\n", "issue": "Number scale not persisting\n## Reproduce\n\n1. In the front end, edit the DB options for a Number column.\n1. Set \"Number Type\" to \"Decimal\". Set \"Decimal Places\" to \"3\". Click Save.\n1. Observe a PATCH request to the columns API which specifies `type_options.scale` to `2`. Good.\n1. In the response of this request (and also in the response of the subsequent GET request to the columns endpoint), expect the column to show `type_options` with an object containing a `scale` of `2`.\n1. Instead, observe `type_options` to be `null`.\n\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty\nfrom rest_framework.settings import api_settings\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.shared_serializers import (\n DisplayOptionsMappingSerializer,\n DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,\n)\nfrom mathesar.models import Column\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Emulates the record creation endpoint,\n which takes in arbitrary values (un-validated and un-processed request.data).\n This field replicates that behavior in a serializer.\n \"\"\"\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n length = serializers.IntegerField(required=False)\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n fields = serializers.CharField(required=False)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty and data is not None:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nclass SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = Column\n fields = ('id',\n 'name',\n 'type',\n 'type_options',\n 'display_options'\n )\n name = serializers.CharField()\n type = serializers.CharField(source='plain_type')\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)\n\n def to_representation(self, instance):\n if isinstance(instance, dict):\n instance_type = instance.get('type')\n else:\n instance_type = instance.plain_type\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n return super().to_representation(instance)\n\n def to_internal_value(self, data):\n if self.partial and 'type' not in data:\n instance_type = getattr(self.instance, 'plain_type', None)\n if instance_type is not None:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n else:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = data.get('type', None)\n return super().to_internal_value(data)\n\n\nclass ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n value = InputValueField()\n is_dynamic = serializers.BooleanField(read_only=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n class Meta(SimpleColumnSerializer.Meta):\n fields = SimpleColumnSerializer.Meta.fields + (\n 'nullable',\n 'primary_key',\n 'source_column',\n 'copy_source_data',\n 'copy_source_constraints',\n 'valid_target_types',\n 'default'\n )\n model_fields = ('display_options',)\n\n name = serializers.CharField(required=False, allow_blank=True)\n\n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n default = ColumnDefaultSerializer(\n source='column_default_dict', required=False, allow_null=True, default=None\n )\n\n # From duplication fields\n source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n valid_target_types = serializers.ListField(read_only=True)\n\n def validate(self, data):\n data = super().validate(data)\n # Reevaluate column display options based on the new column type.\n if 'plain_type' in data and 'display_options' not in data:\n if self.instance:\n instance_type = getattr(self.instance, 'plain_type', None)\n # Invalidate display_options if type has been changed\n if str(instance_type) != data['plain_type']:\n data['display_options'] = None\n else:\n data['display_options'] = None\n if not self.partial:\n from_scratch_required_fields = ['type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n @property\n def validated_model_fields(self):\n return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}\n", "path": "mathesar/api/serializers/columns.py"}, {"content": "import warnings\nfrom psycopg2.errors import DuplicateColumn\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions,\n base_exceptions as database_base_api_exceptions,\n)\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom db.columns.exceptions import (\n DynamicDefaultWarning, InvalidDefaultError, InvalidTypeOptionError, InvalidTypeError,\n)\nfrom db.columns.operations.select import get_column_attnum_from_name\nfrom db.types.exceptions import InvalidTypeParameters\nfrom db.records.exceptions import UndefinedFunction\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.columns import ColumnSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.models import Column\n\n\nclass ColumnViewSet(viewsets.ModelViewSet):\n serializer_class = ColumnSerializer\n pagination_class = DefaultLimitOffsetPagination\n\n def get_queryset(self):\n return Column.objects.filter(table=self.kwargs['table_pk'])\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n # We only support adding a single column through the API.\n serializer = ColumnSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n if 'source_column' in serializer.validated_data:\n column = table.duplicate_column(\n serializer.validated_data['source_column'],\n serializer.validated_data['copy_source_data'],\n serializer.validated_data['copy_source_constraints'],\n serializer.validated_data.get('name'),\n )\n else:\n try:\n column = table.add_column(request.data)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateColumn:\n name = request.data['name']\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message=f'Column {name} already exists',\n field='name',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise database_base_api_exceptions.ProgrammingAPIException(e)\n except TypeError as e:\n raise base_api_exceptions.TypeErrorAPIException(\n e,\n message=\"Unknown type_option passed\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidDefaultError as e:\n raise database_api_exceptions.InvalidDefaultAPIException(\n e,\n message=f'default \"{request.data[\"default\"]}\" is invalid for type {request.data[\"type\"]}',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (InvalidTypeOptionError, InvalidTypeParameters) as e:\n type_options = request.data.get('type_options', '')\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=f'parameter dict {type_options} is invalid for type {request.data[\"type\"]}',\n field=\"type_options\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidTypeError as e:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n message='This type casting is invalid.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n dj_column = Column(\n table=table,\n attnum=get_column_attnum_from_name(table.oid, column.name, table.schema._sa_engine),\n **serializer.validated_model_fields\n )\n dj_column.save()\n out_serializer = ColumnSerializer(dj_column)\n return Response(out_serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n column_instance = self.get_object()\n table = column_instance.table\n serializer = ColumnSerializer(instance=column_instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"error\", category=DynamicDefaultWarning)\n try:\n table.alter_column(column_instance._sa_column.column_attnum, serializer.validated_data)\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n message='This type cast is not implemented',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except ProgrammingError as e:\n raise database_base_api_exceptions.ProgrammingAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except IndexError as e:\n raise base_api_exceptions.NotFoundAPIException(e)\n except TypeError as e:\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=\"Unknown type_option passed\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidDefaultError as e:\n raise database_api_exceptions.InvalidDefaultAPIException(\n e,\n message=f'default \"{request.data[\"default\"]}\" is invalid for this column',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except DynamicDefaultWarning as e:\n raise database_api_exceptions.DynamicDefaultAPIException(\n e,\n message='Changing type of columns with dynamically-generated defaults is not supported.'\n 'Delete or change the default first.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (InvalidTypeOptionError, InvalidTypeParameters) as e:\n type_options = request.data.get('type_options', '')\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=f'parameter dict {type_options} is invalid for type {request.data[\"type\"]}',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidTypeError as e:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n message='This type casting is invalid.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except Exception as e:\n raise base_api_exceptions.MathesarAPIException(e)\n\n serializer.update(column_instance, serializer.validated_model_fields)\n # Invalidate the cache as the underlying columns have changed\n column_instance = self.get_object()\n out_serializer = ColumnSerializer(column_instance)\n return Response(out_serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n column_instance = self.get_object()\n table = column_instance.table\n try:\n table.drop_column(column_instance.attnum)\n column_instance.delete()\n except IndexError:\n raise NotFound\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "path": "mathesar/api/db/viewsets/columns.py"}], "after_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty\nfrom rest_framework.settings import api_settings\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.shared_serializers import (\n DisplayOptionsMappingSerializer,\n DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,\n)\nfrom mathesar.models import Column\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Emulates the record creation endpoint,\n which takes in arbitrary values (un-validated and un-processed request.data).\n This field replicates that behavior in a serializer.\n \"\"\"\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n length = serializers.IntegerField(required=False)\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n fields = serializers.CharField(required=False)\n\n def validate(self, attrs):\n if attrs.get('scale', None) is not None and attrs.get('precision', None) is None:\n attrs['precision'] = 1000\n return super().validate(attrs)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty and data is not None:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nclass SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = Column\n fields = ('id',\n 'name',\n 'type',\n 'type_options',\n 'display_options'\n )\n name = serializers.CharField()\n type = serializers.CharField(source='plain_type')\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)\n\n def to_representation(self, instance):\n if isinstance(instance, dict):\n instance_type = instance.get('type')\n else:\n instance_type = instance.plain_type\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n return super().to_representation(instance)\n\n def to_internal_value(self, data):\n if self.partial and 'type' not in data:\n instance_type = getattr(self.instance, 'plain_type', None)\n if instance_type is not None:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n else:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = data.get('type', None)\n return super().to_internal_value(data)\n\n\nclass ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n value = InputValueField()\n is_dynamic = serializers.BooleanField(read_only=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n class Meta(SimpleColumnSerializer.Meta):\n fields = SimpleColumnSerializer.Meta.fields + (\n 'nullable',\n 'primary_key',\n 'source_column',\n 'copy_source_data',\n 'copy_source_constraints',\n 'valid_target_types',\n 'default'\n )\n model_fields = ('display_options',)\n\n name = serializers.CharField(required=False, allow_blank=True)\n\n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n default = ColumnDefaultSerializer(\n source='column_default_dict', required=False, allow_null=True, default=None\n )\n\n # From duplication fields\n source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n valid_target_types = serializers.ListField(read_only=True)\n\n def validate(self, data):\n data = super().validate(data)\n # Reevaluate column display options based on the new column type.\n if 'plain_type' in data and 'display_options' not in data:\n if self.instance:\n instance_type = getattr(self.instance, 'plain_type', None)\n # Invalidate display_options if type has been changed\n if str(instance_type) != data['plain_type']:\n data['display_options'] = None\n else:\n data['display_options'] = None\n if not self.partial:\n from_scratch_required_fields = ['type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n @property\n def validated_model_fields(self):\n return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}\n", "path": "mathesar/api/serializers/columns.py"}, {"content": "import warnings\nfrom psycopg2.errors import DuplicateColumn\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions,\n base_exceptions as database_base_api_exceptions,\n)\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom db.columns.exceptions import (\n DynamicDefaultWarning, InvalidDefaultError, InvalidTypeOptionError, InvalidTypeError,\n)\nfrom db.columns.operations.select import get_column_attnum_from_name\nfrom db.types.exceptions import InvalidTypeParameters\nfrom db.records.exceptions import UndefinedFunction\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.columns import ColumnSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.models import Column\n\n\nclass ColumnViewSet(viewsets.ModelViewSet):\n serializer_class = ColumnSerializer\n pagination_class = DefaultLimitOffsetPagination\n\n def get_queryset(self):\n return Column.objects.filter(table=self.kwargs['table_pk'])\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n # We only support adding a single column through the API.\n serializer = ColumnSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n type_options = request.data.get('type_options', None)\n if type_options is not None:\n scale = type_options.get('scale', None)\n precision = type_options.get('precision', None)\n if scale is not None and precision is None:\n request.data['type_options']['precision'] = 1000\n if 'source_column' in serializer.validated_data:\n column = table.duplicate_column(\n serializer.validated_data['source_column'],\n serializer.validated_data['copy_source_data'],\n serializer.validated_data['copy_source_constraints'],\n serializer.validated_data.get('name'),\n )\n else:\n try:\n # TODO Refactor add_column to user serializer validated date instead of request data\n column = table.add_column(request.data)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateColumn:\n name = request.data['name']\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message=f'Column {name} already exists',\n field='name',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise database_base_api_exceptions.ProgrammingAPIException(e)\n except TypeError as e:\n raise base_api_exceptions.TypeErrorAPIException(\n e,\n message=\"Unknown type_option passed\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidDefaultError as e:\n raise database_api_exceptions.InvalidDefaultAPIException(\n e,\n message=f'default \"{request.data[\"default\"]}\" is invalid for type {request.data[\"type\"]}',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (InvalidTypeOptionError, InvalidTypeParameters) as e:\n type_options = request.data.get('type_options', '')\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=f'parameter dict {type_options} is invalid for type {request.data[\"type\"]}',\n field=\"type_options\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidTypeError as e:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n message='This type casting is invalid.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n dj_column = Column(\n table=table,\n attnum=get_column_attnum_from_name(table.oid, column.name, table.schema._sa_engine),\n **serializer.validated_model_fields\n )\n dj_column.save()\n out_serializer = ColumnSerializer(dj_column)\n return Response(out_serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n column_instance = self.get_object()\n table = column_instance.table\n serializer = ColumnSerializer(instance=column_instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"error\", category=DynamicDefaultWarning)\n try:\n table.alter_column(column_instance._sa_column.column_attnum, serializer.validated_data)\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n message='This type cast is not implemented',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except ProgrammingError as e:\n raise database_base_api_exceptions.ProgrammingAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except IndexError as e:\n raise base_api_exceptions.NotFoundAPIException(e)\n except TypeError as e:\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=\"Unknown type_option passed\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidDefaultError as e:\n raise database_api_exceptions.InvalidDefaultAPIException(\n e,\n message=f'default \"{request.data[\"default\"]}\" is invalid for this column',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except DynamicDefaultWarning as e:\n raise database_api_exceptions.DynamicDefaultAPIException(\n e,\n message='Changing type of columns with dynamically-generated defaults is not supported.'\n 'Delete or change the default first.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (InvalidTypeOptionError, InvalidTypeParameters) as e:\n type_options = request.data.get('type_options', '')\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=f'parameter dict {type_options} is invalid for type {request.data[\"type\"]}',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidTypeError as e:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n message='This type casting is invalid.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except Exception as e:\n raise base_api_exceptions.MathesarAPIException(e)\n\n serializer.update(column_instance, serializer.validated_model_fields)\n # Invalidate the cache as the underlying columns have changed\n column_instance = self.get_object()\n out_serializer = ColumnSerializer(column_instance)\n return Response(out_serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n column_instance = self.get_object()\n table = column_instance.table\n try:\n table.drop_column(column_instance.attnum)\n column_instance.delete()\n except IndexError:\n raise NotFound\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "path": "mathesar/api/db/viewsets/columns.py"}]} | 3,885 | 435 |
gh_patches_debug_20700 | rasdani/github-patches | git_diff | learningequality__kolibri-8371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unsupported browser template does not use theme styling
### Observed behavior
The unsupported browser template uses static css in the template - mostly this is fine, but the colours used in there are Kolibri default theme colours, not those drawn from the currently loaded theme plugin.
### Expected behavior
Should use the themes defined by the currently active theme hook.
### User-facing consequences
Custom themes might look odd for unsupported browsers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/views.py`
Content:
```
1 from django.contrib.auth import logout
2 from django.core.urlresolvers import reverse
3 from django.http import Http404
4 from django.http import HttpResponse
5 from django.http import HttpResponseRedirect
6 from django.shortcuts import redirect
7 from django.urls import is_valid_path
8 from django.urls import translate_url
9 from django.utils.decorators import method_decorator
10 from django.utils.six.moves.urllib.parse import urlsplit
11 from django.utils.six.moves.urllib.parse import urlunsplit
12 from django.utils.translation import check_for_language
13 from django.utils.translation import LANGUAGE_SESSION_KEY
14 from django.utils.translation import ugettext_lazy as _
15 from django.views.decorators.http import require_POST
16 from django.views.generic.base import TemplateView
17 from django.views.generic.base import View
18 from django.views.i18n import LANGUAGE_QUERY_PARAMETER
19 from django.views.static import serve
20
21 from kolibri.core.auth.constants import user_kinds
22 from kolibri.core.auth.models import Role
23 from kolibri.core.decorators import cache_no_user_data
24 from kolibri.core.device.hooks import SetupHook
25 from kolibri.core.device.translation import get_accept_headers_language
26 from kolibri.core.device.translation import get_device_language
27 from kolibri.core.device.translation import get_settings_language
28 from kolibri.core.device.utils import allow_guest_access
29 from kolibri.core.device.utils import device_provisioned
30 from kolibri.core.hooks import LogoutRedirectHook
31 from kolibri.core.hooks import RoleBasedRedirectHook
32
33
34 # Modified from django.views.i18n
35 @require_POST
36 def set_language(request):
37 """
38 Since this view changes how the user will see the rest of the site, it must
39 only be accessed as a POST request. If called as a GET request, it will
40 error.
41 """
42 lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
43 next_url = urlsplit(request.POST.get("next")) if request.POST.get("next") else None
44 if lang_code and check_for_language(lang_code):
45 if next_url and is_valid_path(next_url.path):
46 # If it is a recognized Kolibri path, then translate it to the new language and return it.
47 next_path = urlunsplit(
48 (
49 next_url[0],
50 next_url[1],
51 translate_url(next_url[2], lang_code),
52 next_url[3],
53 next_url[4],
54 )
55 )
56 else:
57 next_path = translate_url(reverse("kolibri:core:redirect_user"), lang_code)
58 response = HttpResponse(next_path)
59 if hasattr(request, "session"):
60 request.session[LANGUAGE_SESSION_KEY] = lang_code
61 else:
62 lang_code = (
63 get_device_language()
64 or get_accept_headers_language(request)
65 or get_settings_language()
66 )
67 if next_url and is_valid_path(next_url.path):
68 # If it is a recognized Kolibri path, then translate it using the default language code for this device
69 next_path = urlunsplit(
70 (
71 next_url[0],
72 next_url[1],
73 translate_url(next_url[2], lang_code),
74 next_url[3],
75 next_url[4],
76 )
77 )
78 else:
79 next_path = translate_url(reverse("kolibri:core:redirect_user"), lang_code)
80 response = HttpResponse(next_path)
81 if hasattr(request, "session"):
82 request.session.pop(LANGUAGE_SESSION_KEY, "")
83 return response
84
85
86 def logout_view(request):
87 logout(request)
88 if LogoutRedirectHook.is_enabled():
89 return HttpResponseRedirect(
90 next(obj.url for obj in LogoutRedirectHook.registered_hooks)
91 )
92 return HttpResponseRedirect(reverse("kolibri:core:redirect_user"))
93
94
95 def get_urls_by_role(role):
96 for hook in RoleBasedRedirectHook.registered_hooks:
97 if role in hook.roles:
98 yield hook.url
99
100
101 def get_url_by_role(role):
102 obj = next(
103 (hook for hook in RoleBasedRedirectHook.registered_hooks if role in hook.roles),
104 None,
105 )
106
107 if obj:
108 return obj.url
109
110
111 class GuestRedirectView(View):
112 def get(self, request):
113 """
114 Redirects a guest user to a learner accessible page.
115 """
116 if allow_guest_access():
117 return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER))
118 return RootURLRedirectView.as_view()(request)
119
120
121 device_is_provisioned = False
122
123
124 def is_provisioned():
125 # First check if the device has been provisioned
126 global device_is_provisioned
127 device_is_provisioned = device_is_provisioned or device_provisioned()
128 return device_is_provisioned
129
130
131 class RootURLRedirectView(View):
132 def get(self, request):
133 """
134 Redirects user based on the highest role they have for which a redirect is defined.
135 """
136 # If it has not been provisioned and we have something that can handle setup, redirect there.
137 if not is_provisioned() and SetupHook.provision_url:
138 return redirect(SetupHook.provision_url())
139
140 if request.user.is_authenticated():
141 url = None
142 if request.user.is_superuser:
143 url = url or get_url_by_role(user_kinds.SUPERUSER)
144 roles = set(
145 Role.objects.filter(user_id=request.user.id)
146 .values_list("kind", flat=True)
147 .distinct()
148 )
149 if user_kinds.ADMIN in roles:
150 url = url or get_url_by_role(user_kinds.ADMIN)
151 if user_kinds.COACH in roles or user_kinds.ASSIGNABLE_COACH in roles:
152 url = url or get_url_by_role(user_kinds.COACH)
153 url = url or get_url_by_role(user_kinds.LEARNER)
154 else:
155 url = get_url_by_role(user_kinds.ANONYMOUS)
156 if url:
157 return HttpResponseRedirect(url)
158 raise Http404(
159 _(
160 "No appropriate redirect pages found. It is likely that Kolibri is badly configured"
161 )
162 )
163
164
165 @method_decorator(cache_no_user_data, name="dispatch")
166 class UnsupportedBrowserView(TemplateView):
167 template_name = "kolibri/unsupported_browser.html"
168
169
170 class StatusCheckView(View):
171 def get(self, request):
172 """
173 Confirms that the server is up
174 """
175 return HttpResponse()
176
177
178 def static_serve_with_fallbacks(search_paths):
179 """
180 Serve a static file by iterating over search_paths until a matching file is found.
181 If a matching file is not found on any of the paths, a 404 will be raised.
182 """
183
184 def serve_func(request, path, document_root=None):
185
186 for search_path in search_paths:
187 try:
188 return serve(request, path, document_root=search_path)
189 except Http404:
190 pass
191
192 # allow the Http404 to be raised, since we couldn't find the file anywhere
193 return serve(request, path, document_root=search_paths[0])
194
195 return serve_func
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/views.py b/kolibri/core/views.py
--- a/kolibri/core/views.py
+++ b/kolibri/core/views.py
@@ -29,6 +29,10 @@
from kolibri.core.device.utils import device_provisioned
from kolibri.core.hooks import LogoutRedirectHook
from kolibri.core.hooks import RoleBasedRedirectHook
+from kolibri.core.theme_hook import BRAND_COLORS
+from kolibri.core.theme_hook import COLOR_V400
+from kolibri.core.theme_hook import PRIMARY
+from kolibri.core.theme_hook import ThemeHook
# Modified from django.views.i18n
@@ -166,6 +170,16 @@
class UnsupportedBrowserView(TemplateView):
template_name = "kolibri/unsupported_browser.html"
+ def get_context_data(self, **kwargs):
+ context = super(UnsupportedBrowserView, self).get_context_data(**kwargs)
+ context["brand_primary_v400"] = (
+ ThemeHook.get_theme()
+ .get(BRAND_COLORS, {})
+ .get(PRIMARY, {})
+ .get(COLOR_V400, "purple")
+ )
+ return context
+
class StatusCheckView(View):
def get(self, request):
| {"golden_diff": "diff --git a/kolibri/core/views.py b/kolibri/core/views.py\n--- a/kolibri/core/views.py\n+++ b/kolibri/core/views.py\n@@ -29,6 +29,10 @@\n from kolibri.core.device.utils import device_provisioned\n from kolibri.core.hooks import LogoutRedirectHook\n from kolibri.core.hooks import RoleBasedRedirectHook\n+from kolibri.core.theme_hook import BRAND_COLORS\n+from kolibri.core.theme_hook import COLOR_V400\n+from kolibri.core.theme_hook import PRIMARY\n+from kolibri.core.theme_hook import ThemeHook\n \n \n # Modified from django.views.i18n\n@@ -166,6 +170,16 @@\n class UnsupportedBrowserView(TemplateView):\n template_name = \"kolibri/unsupported_browser.html\"\n \n+ def get_context_data(self, **kwargs):\n+ context = super(UnsupportedBrowserView, self).get_context_data(**kwargs)\n+ context[\"brand_primary_v400\"] = (\n+ ThemeHook.get_theme()\n+ .get(BRAND_COLORS, {})\n+ .get(PRIMARY, {})\n+ .get(COLOR_V400, \"purple\")\n+ )\n+ return context\n+\n \n class StatusCheckView(View):\n def get(self, request):\n", "issue": "Unsupported browser template does not use theme styling\n### Observed behavior\r\nThe unsupported browser template uses static css in the template - mostly this is fine, but the colours used in there are Kolibri default theme colours, not those drawn from the currently loaded theme plugin.\r\n\r\n### Expected behavior\r\nShould use the themes defined by the currently active theme hook.\r\n\r\n### User-facing consequences\r\nCustom themes might look odd for unsupported browsers.\r\n\n", "before_files": [{"content": "from django.contrib.auth import logout\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import is_valid_path\nfrom django.urls import translate_url\nfrom django.utils.decorators import method_decorator\nfrom django.utils.six.moves.urllib.parse import urlsplit\nfrom django.utils.six.moves.urllib.parse import urlunsplit\nfrom django.utils.translation import check_for_language\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.base import View\nfrom django.views.i18n import LANGUAGE_QUERY_PARAMETER\nfrom django.views.static import serve\n\nfrom kolibri.core.auth.constants import user_kinds\nfrom kolibri.core.auth.models import Role\nfrom kolibri.core.decorators import cache_no_user_data\nfrom kolibri.core.device.hooks import SetupHook\nfrom kolibri.core.device.translation import get_accept_headers_language\nfrom kolibri.core.device.translation import get_device_language\nfrom kolibri.core.device.translation import get_settings_language\nfrom kolibri.core.device.utils import allow_guest_access\nfrom kolibri.core.device.utils import device_provisioned\nfrom kolibri.core.hooks import LogoutRedirectHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\n\n\n# Modified from django.views.i18n\n@require_POST\ndef set_language(request):\n \"\"\"\n Since this view changes how the user will see the rest of the site, it must\n only be accessed as a POST request. If called as a GET request, it will\n error.\n \"\"\"\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n next_url = urlsplit(request.POST.get(\"next\")) if request.POST.get(\"next\") else None\n if lang_code and check_for_language(lang_code):\n if next_url and is_valid_path(next_url.path):\n # If it is a recognized Kolibri path, then translate it to the new language and return it.\n next_path = urlunsplit(\n (\n next_url[0],\n next_url[1],\n translate_url(next_url[2], lang_code),\n next_url[3],\n next_url[4],\n )\n )\n else:\n next_path = translate_url(reverse(\"kolibri:core:redirect_user\"), lang_code)\n response = HttpResponse(next_path)\n if hasattr(request, \"session\"):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n else:\n lang_code = (\n get_device_language()\n or get_accept_headers_language(request)\n or get_settings_language()\n )\n if next_url and is_valid_path(next_url.path):\n # If it is a recognized Kolibri path, then translate it using the default language code for this device\n next_path = urlunsplit(\n (\n next_url[0],\n next_url[1],\n translate_url(next_url[2], lang_code),\n next_url[3],\n next_url[4],\n )\n )\n else:\n next_path = translate_url(reverse(\"kolibri:core:redirect_user\"), lang_code)\n response = HttpResponse(next_path)\n if hasattr(request, \"session\"):\n request.session.pop(LANGUAGE_SESSION_KEY, \"\")\n return response\n\n\ndef logout_view(request):\n logout(request)\n if LogoutRedirectHook.is_enabled():\n return HttpResponseRedirect(\n next(obj.url for obj in LogoutRedirectHook.registered_hooks)\n )\n return HttpResponseRedirect(reverse(\"kolibri:core:redirect_user\"))\n\n\ndef get_urls_by_role(role):\n for hook in RoleBasedRedirectHook.registered_hooks:\n if role in hook.roles:\n yield hook.url\n\n\ndef get_url_by_role(role):\n obj = next(\n (hook for hook in RoleBasedRedirectHook.registered_hooks if role in hook.roles),\n None,\n )\n\n if obj:\n return obj.url\n\n\nclass GuestRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects a guest user to a learner accessible page.\n \"\"\"\n if allow_guest_access():\n return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER))\n return RootURLRedirectView.as_view()(request)\n\n\ndevice_is_provisioned = False\n\n\ndef is_provisioned():\n # First check if the device has been provisioned\n global device_is_provisioned\n device_is_provisioned = device_is_provisioned or device_provisioned()\n return device_is_provisioned\n\n\nclass RootURLRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects user based on the highest role they have for which a redirect is defined.\n \"\"\"\n # If it has not been provisioned and we have something that can handle setup, redirect there.\n if not is_provisioned() and SetupHook.provision_url:\n return redirect(SetupHook.provision_url())\n\n if request.user.is_authenticated():\n url = None\n if request.user.is_superuser:\n url = url or get_url_by_role(user_kinds.SUPERUSER)\n roles = set(\n Role.objects.filter(user_id=request.user.id)\n .values_list(\"kind\", flat=True)\n .distinct()\n )\n if user_kinds.ADMIN in roles:\n url = url or get_url_by_role(user_kinds.ADMIN)\n if user_kinds.COACH in roles or user_kinds.ASSIGNABLE_COACH in roles:\n url = url or get_url_by_role(user_kinds.COACH)\n url = url or get_url_by_role(user_kinds.LEARNER)\n else:\n url = get_url_by_role(user_kinds.ANONYMOUS)\n if url:\n return HttpResponseRedirect(url)\n raise Http404(\n _(\n \"No appropriate redirect pages found. It is likely that Kolibri is badly configured\"\n )\n )\n\n\n@method_decorator(cache_no_user_data, name=\"dispatch\")\nclass UnsupportedBrowserView(TemplateView):\n template_name = \"kolibri/unsupported_browser.html\"\n\n\nclass StatusCheckView(View):\n def get(self, request):\n \"\"\"\n Confirms that the server is up\n \"\"\"\n return HttpResponse()\n\n\ndef static_serve_with_fallbacks(search_paths):\n \"\"\"\n Serve a static file by iterating over search_paths until a matching file is found.\n If a matching file is not found on any of the paths, a 404 will be raised.\n \"\"\"\n\n def serve_func(request, path, document_root=None):\n\n for search_path in search_paths:\n try:\n return serve(request, path, document_root=search_path)\n except Http404:\n pass\n\n # allow the Http404 to be raised, since we couldn't find the file anywhere\n return serve(request, path, document_root=search_paths[0])\n\n return serve_func\n", "path": "kolibri/core/views.py"}], "after_files": [{"content": "from django.contrib.auth import logout\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import is_valid_path\nfrom django.urls import translate_url\nfrom django.utils.decorators import method_decorator\nfrom django.utils.six.moves.urllib.parse import urlsplit\nfrom django.utils.six.moves.urllib.parse import urlunsplit\nfrom django.utils.translation import check_for_language\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.base import View\nfrom django.views.i18n import LANGUAGE_QUERY_PARAMETER\nfrom django.views.static import serve\n\nfrom kolibri.core.auth.constants import user_kinds\nfrom kolibri.core.auth.models import Role\nfrom kolibri.core.decorators import cache_no_user_data\nfrom kolibri.core.device.hooks import SetupHook\nfrom kolibri.core.device.translation import get_accept_headers_language\nfrom kolibri.core.device.translation import get_device_language\nfrom kolibri.core.device.translation import get_settings_language\nfrom kolibri.core.device.utils import allow_guest_access\nfrom kolibri.core.device.utils import device_provisioned\nfrom kolibri.core.hooks import LogoutRedirectHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\nfrom kolibri.core.theme_hook import BRAND_COLORS\nfrom kolibri.core.theme_hook import COLOR_V400\nfrom kolibri.core.theme_hook import PRIMARY\nfrom kolibri.core.theme_hook import ThemeHook\n\n\n# Modified from django.views.i18n\n@require_POST\ndef set_language(request):\n \"\"\"\n Since this view changes how the user will see the rest of the site, it must\n only be accessed as a POST request. If called as a GET request, it will\n error.\n \"\"\"\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n next_url = urlsplit(request.POST.get(\"next\")) if request.POST.get(\"next\") else None\n if lang_code and check_for_language(lang_code):\n if next_url and is_valid_path(next_url.path):\n # If it is a recognized Kolibri path, then translate it to the new language and return it.\n next_path = urlunsplit(\n (\n next_url[0],\n next_url[1],\n translate_url(next_url[2], lang_code),\n next_url[3],\n next_url[4],\n )\n )\n else:\n next_path = translate_url(reverse(\"kolibri:core:redirect_user\"), lang_code)\n response = HttpResponse(next_path)\n if hasattr(request, \"session\"):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n else:\n lang_code = (\n get_device_language()\n or get_accept_headers_language(request)\n or get_settings_language()\n )\n if next_url and is_valid_path(next_url.path):\n # If it is a recognized Kolibri path, then translate it using the default language code for this device\n next_path = urlunsplit(\n (\n next_url[0],\n next_url[1],\n translate_url(next_url[2], lang_code),\n next_url[3],\n next_url[4],\n )\n )\n else:\n next_path = translate_url(reverse(\"kolibri:core:redirect_user\"), lang_code)\n response = HttpResponse(next_path)\n if hasattr(request, \"session\"):\n request.session.pop(LANGUAGE_SESSION_KEY, \"\")\n return response\n\n\ndef logout_view(request):\n logout(request)\n if LogoutRedirectHook.is_enabled():\n return HttpResponseRedirect(\n next(obj.url for obj in LogoutRedirectHook.registered_hooks)\n )\n return HttpResponseRedirect(reverse(\"kolibri:core:redirect_user\"))\n\n\ndef get_urls_by_role(role):\n for hook in RoleBasedRedirectHook.registered_hooks:\n if role in hook.roles:\n yield hook.url\n\n\ndef get_url_by_role(role):\n obj = next(\n (hook for hook in RoleBasedRedirectHook.registered_hooks if role in hook.roles),\n None,\n )\n\n if obj:\n return obj.url\n\n\nclass GuestRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects a guest user to a learner accessible page.\n \"\"\"\n if allow_guest_access():\n return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER))\n return RootURLRedirectView.as_view()(request)\n\n\ndevice_is_provisioned = False\n\n\ndef is_provisioned():\n # First check if the device has been provisioned\n global device_is_provisioned\n device_is_provisioned = device_is_provisioned or device_provisioned()\n return device_is_provisioned\n\n\nclass RootURLRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects user based on the highest role they have for which a redirect is defined.\n \"\"\"\n # If it has not been provisioned and we have something that can handle setup, redirect there.\n if not is_provisioned() and SetupHook.provision_url:\n return redirect(SetupHook.provision_url())\n\n if request.user.is_authenticated():\n url = None\n if request.user.is_superuser:\n url = url or get_url_by_role(user_kinds.SUPERUSER)\n roles = set(\n Role.objects.filter(user_id=request.user.id)\n .values_list(\"kind\", flat=True)\n .distinct()\n )\n if user_kinds.ADMIN in roles:\n url = url or get_url_by_role(user_kinds.ADMIN)\n if user_kinds.COACH in roles or user_kinds.ASSIGNABLE_COACH in roles:\n url = url or get_url_by_role(user_kinds.COACH)\n url = url or get_url_by_role(user_kinds.LEARNER)\n else:\n url = get_url_by_role(user_kinds.ANONYMOUS)\n if url:\n return HttpResponseRedirect(url)\n raise Http404(\n _(\n \"No appropriate redirect pages found. It is likely that Kolibri is badly configured\"\n )\n )\n\n\n@method_decorator(cache_no_user_data, name=\"dispatch\")\nclass UnsupportedBrowserView(TemplateView):\n template_name = \"kolibri/unsupported_browser.html\"\n\n def get_context_data(self, **kwargs):\n context = super(UnsupportedBrowserView, self).get_context_data(**kwargs)\n context[\"brand_primary_v400\"] = (\n ThemeHook.get_theme()\n .get(BRAND_COLORS, {})\n .get(PRIMARY, {})\n .get(COLOR_V400, \"purple\")\n )\n return context\n\n\nclass StatusCheckView(View):\n def get(self, request):\n \"\"\"\n Confirms that the server is up\n \"\"\"\n return HttpResponse()\n\n\ndef static_serve_with_fallbacks(search_paths):\n \"\"\"\n Serve a static file by iterating over search_paths until a matching file is found.\n If a matching file is not found on any of the paths, a 404 will be raised.\n \"\"\"\n\n def serve_func(request, path, document_root=None):\n\n for search_path in search_paths:\n try:\n return serve(request, path, document_root=search_path)\n except Http404:\n pass\n\n # allow the Http404 to be raised, since we couldn't find the file anywhere\n return serve(request, path, document_root=search_paths[0])\n\n return serve_func\n", "path": "kolibri/core/views.py"}]} | 2,278 | 279 |
gh_patches_debug_15954 | rasdani/github-patches | git_diff | Nitrate__Nitrate-352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove django 1.10 support
- Remove django 1.10 testenv from tox.ini
- Update django version in setup.py. Minimum django version is `1.11`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 with open('VERSION.txt', 'r') as f:
9 pkg_version = f.read().strip()
10
11
12 def get_long_description():
13 with open('README.rst', 'r') as f:
14 return f.read()
15
16
17 install_requires = [
18 'PyMySQL == 0.7.11',
19 'beautifulsoup4 >= 4.1.1',
20 'celery == 4.1.0',
21 'django >= 1.10,<2.0',
22 'django-contrib-comments == 1.8.0',
23 'django-tinymce == 2.7.0',
24 'django-uuslug == 1.1.8',
25 'html2text',
26 'kobo == 0.7.0',
27 'odfpy >= 0.9.6',
28 'python-bugzilla',
29 'six',
30 'xmltodict',
31 ]
32
33 if sys.version_info.major < 3:
34 install_requires += [
35 'enum34',
36 ]
37
38 extras_require = {
39 # Required for tcms.core.contrib.auth.backends.KerberosBackend
40 'krbauth': [
41 'kerberos == 1.2.5'
42 ],
43
44 # Packages for building documentation
45 'docs': [
46 'Sphinx >= 1.1.2',
47 'sphinx_rtd_theme',
48 ],
49
50 # Necessary packages for running tests
51 'tests': [
52 'coverage',
53 'factory_boy',
54 'flake8',
55 'mock',
56 'pytest',
57 'pytest-cov',
58 'pytest-django',
59 ],
60
61 # Contain tools that assists the development
62 'devtools': [
63 'django-debug-toolbar == 1.7',
64 'tox',
65 'django-extensions',
66 'pygraphviz',
67 'future-breakpoint',
68 ]
69 }
70
71
72 setup(
73 name='Nitrate',
74 version=pkg_version,
75 description='Test Case Management System',
76 long_description=get_long_description(),
77 author='Nitrate Team',
78 maintainer='Chenxiong Qi',
79 maintainer_email='[email protected]',
80 url='https://github.com/Nitrate/Nitrate/',
81 license='GPLv2+',
82 keywords='test case',
83 install_requires=install_requires,
84 extras_require=extras_require,
85 packages=find_packages(),
86 include_package_data=True,
87 classifiers=[
88 'Framework :: Django',
89 'Framework :: Django :: 1.10',
90 'Framework :: Django :: 1.11',
91 'Intended Audience :: Developers',
92 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
93 'Programming Language :: Python :: 2',
94 'Programming Language :: Python :: 2.7',
95 'Programming Language :: Python :: 3',
96 'Programming Language :: Python :: 3.6',
97 'Topic :: Software Development :: Quality Assurance',
98 'Topic :: Software Development :: Testing',
99 ],
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
'PyMySQL == 0.7.11',
'beautifulsoup4 >= 4.1.1',
'celery == 4.1.0',
- 'django >= 1.10,<2.0',
+ 'django >= 1.11,<2.0',
'django-contrib-comments == 1.8.0',
'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
@@ -86,7 +86,6 @@
include_package_data=True,
classifiers=[
'Framework :: Django',
- 'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n- 'django >= 1.10,<2.0',\n+ 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n@@ -86,7 +86,6 @@\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n- 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n", "issue": "Remove django 1.10 support\n- Remove django 1.10 testenv from tox.ini\r\n- Update django version in setup.py. Minimum django version is `1.11`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django >= 1.10,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]} | 1,155 | 222 |
gh_patches_debug_50088 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in comment in conversationbot2.py
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug, or if you want to request a new feature.
If you're looking for help with programming your bot using our library, feel free to ask your
questions in out telegram group at: https://t.me/pythontelegrambotgroup
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Not really a bug... wrong comment line in conversationbot2.py :)
### Expected behaviour
Should be: # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
### Actual behaviour
Actual comment in conversationbot2.py (leftover from conversationbot.py I guess :))
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
### Configuration
**Operating System:**
Windows
**Version of Python, python-telegram-bot & dependencies:**
3.6
``$ python -m telegram``
### Logs
Insert logs here (if necessary)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/conversationbot2.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Simple Bot to reply to Telegram messages
5 # This program is dedicated to the public domain under the CC0 license.
6 """
7 This Bot uses the Updater class to handle the bot.
8
9 First, a few callback functions are defined. Then, those functions are passed to
10 the Dispatcher and registered at their respective places.
11 Then, the bot is started and runs until we press Ctrl-C on the command line.
12
13 Usage:
14 Example of a bot-user conversation using ConversationHandler.
15 Send /start to initiate the conversation.
16 Press Ctrl-C on the command line or send a signal to the process to stop the
17 bot.
18 """
19
20 from telegram import ReplyKeyboardMarkup
21 from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
22 ConversationHandler)
23
24 import logging
25
26 # Enable logging
27 logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
28 level=logging.INFO)
29
30 logger = logging.getLogger(__name__)
31
32 CHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)
33
34 reply_keyboard = [['Age', 'Favourite colour'],
35 ['Number of siblings', 'Something else...'],
36 ['Done']]
37 markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
38
39
40 def facts_to_str(user_data):
41 facts = list()
42
43 for key, value in user_data.items():
44 facts.append('{} - {}'.format(key, value))
45
46 return "\n".join(facts).join(['\n', '\n'])
47
48
49 def start(bot, update):
50 update.message.reply_text(
51 "Hi! My name is Doctor Botter. I will hold a more complex conversation with you. "
52 "Why don't you tell me something about yourself?",
53 reply_markup=markup)
54
55 return CHOOSING
56
57
58 def regular_choice(bot, update, user_data):
59 text = update.message.text
60 user_data['choice'] = text
61 update.message.reply_text(
62 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))
63
64 return TYPING_REPLY
65
66
67 def custom_choice(bot, update):
68 update.message.reply_text('Alright, please send me the category first, '
69 'for example "Most impressive skill"')
70
71 return TYPING_CHOICE
72
73
74 def received_information(bot, update, user_data):
75 text = update.message.text
76 category = user_data['choice']
77 user_data[category] = text
78 del user_data['choice']
79
80 update.message.reply_text("Neat! Just so you know, this is what you already told me:"
81 "{}"
82 "You can tell me more, or change your opinion on something.".format(
83 facts_to_str(user_data)), reply_markup=markup)
84
85 return CHOOSING
86
87
88 def done(bot, update, user_data):
89 if 'choice' in user_data:
90 del user_data['choice']
91
92 update.message.reply_text("I learned these facts about you:"
93 "{}"
94 "Until next time!".format(facts_to_str(user_data)))
95
96 user_data.clear()
97 return ConversationHandler.END
98
99
100 def error(bot, update, error):
101 """Log Errors caused by Updates."""
102 logger.warning('Update "%s" caused error "%s"', update, error)
103
104
105 def main():
106 # Create the Updater and pass it your bot's token.
107 updater = Updater("TOKEN")
108
109 # Get the dispatcher to register handlers
110 dp = updater.dispatcher
111
112 # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
113 conv_handler = ConversationHandler(
114 entry_points=[CommandHandler('start', start)],
115
116 states={
117 CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',
118 regular_choice,
119 pass_user_data=True),
120 RegexHandler('^Something else...$',
121 custom_choice),
122 ],
123
124 TYPING_CHOICE: [MessageHandler(Filters.text,
125 regular_choice,
126 pass_user_data=True),
127 ],
128
129 TYPING_REPLY: [MessageHandler(Filters.text,
130 received_information,
131 pass_user_data=True),
132 ],
133 },
134
135 fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]
136 )
137
138 dp.add_handler(conv_handler)
139
140 # log all errors
141 dp.add_error_handler(error)
142
143 # Start the Bot
144 updater.start_polling()
145
146 # Run the bot until you press Ctrl-C or the process receives SIGINT,
147 # SIGTERM or SIGABRT. This should be used most of the time, since
148 # start_polling() is non-blocking and will stop the bot gracefully.
149 updater.idle()
150
151
152 if __name__ == '__main__':
153 main()
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/conversationbot2.py b/examples/conversationbot2.py
--- a/examples/conversationbot2.py
+++ b/examples/conversationbot2.py
@@ -109,7 +109,7 @@
# Get the dispatcher to register handlers
dp = updater.dispatcher
- # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
+ # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
| {"golden_diff": "diff --git a/examples/conversationbot2.py b/examples/conversationbot2.py\n--- a/examples/conversationbot2.py\n+++ b/examples/conversationbot2.py\n@@ -109,7 +109,7 @@\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n \n- # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\n+ # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n", "issue": "Typo in comment in conversationbot2.py\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug, or if you want to request a new feature.\r\nIf you're looking for help with programming your bot using our library, feel free to ask your\r\nquestions in out telegram group at: https://t.me/pythontelegrambotgroup\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. Not really a bug... wrong comment line in conversationbot2.py :)\r\n\r\n### Expected behaviour\r\nShould be: # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\r\n\r\n### Actual behaviour\r\nActual comment in conversationbot2.py (leftover from conversationbot.py I guess :))\r\n# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\r\n\r\n### Configuration\r\n**Operating System:**\r\nWindows\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n3.6\r\n\r\n``$ python -m telegram``\r\n\r\n### Logs\r\nInsert logs here (if necessary)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Simple Bot to reply to Telegram messages\n# This program is dedicated to the public domain under the CC0 license.\n\"\"\"\nThis Bot uses the Updater class to handle the bot.\n\nFirst, a few callback functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nExample of a bot-user conversation using ConversationHandler.\nSend /start to initiate the conversation.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\n\nimport logging\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nCHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)\n\nreply_keyboard = [['Age', 'Favourite colour'],\n ['Number of siblings', 'Something else...'],\n ['Done']]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n\ndef facts_to_str(user_data):\n facts = list()\n\n for key, value in user_data.items():\n facts.append('{} - {}'.format(key, value))\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\n\ndef start(bot, update):\n update.message.reply_text(\n \"Hi! My name is Doctor Botter. I will hold a more complex conversation with you. \"\n \"Why don't you tell me something about yourself?\",\n reply_markup=markup)\n\n return CHOOSING\n\n\ndef regular_choice(bot, update, user_data):\n text = update.message.text\n user_data['choice'] = text\n update.message.reply_text(\n 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))\n\n return TYPING_REPLY\n\n\ndef custom_choice(bot, update):\n update.message.reply_text('Alright, please send me the category first, '\n 'for example \"Most impressive skill\"')\n\n return TYPING_CHOICE\n\n\ndef received_information(bot, update, user_data):\n text = update.message.text\n category = user_data['choice']\n user_data[category] = text\n del user_data['choice']\n\n update.message.reply_text(\"Neat! Just so you know, this is what you already told me:\"\n \"{}\"\n \"You can tell me more, or change your opinion on something.\".format(\n facts_to_str(user_data)), reply_markup=markup)\n\n return CHOOSING\n\n\ndef done(bot, update, user_data):\n if 'choice' in user_data:\n del user_data['choice']\n\n update.message.reply_text(\"I learned these facts about you:\"\n \"{}\"\n \"Until next time!\".format(facts_to_str(user_data)))\n\n user_data.clear()\n return ConversationHandler.END\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n # Create the Updater and pass it your bot's token.\n updater = Updater(\"TOKEN\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',\n regular_choice,\n pass_user_data=True),\n RegexHandler('^Something else...$',\n custom_choice),\n ],\n\n TYPING_CHOICE: [MessageHandler(Filters.text,\n regular_choice,\n pass_user_data=True),\n ],\n\n TYPING_REPLY: [MessageHandler(Filters.text,\n received_information,\n pass_user_data=True),\n ],\n },\n\n fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]\n )\n\n dp.add_handler(conv_handler)\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/conversationbot2.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Simple Bot to reply to Telegram messages\n# This program is dedicated to the public domain under the CC0 license.\n\"\"\"\nThis Bot uses the Updater class to handle the bot.\n\nFirst, a few callback functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nExample of a bot-user conversation using ConversationHandler.\nSend /start to initiate the conversation.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\n\nimport logging\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nCHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)\n\nreply_keyboard = [['Age', 'Favourite colour'],\n ['Number of siblings', 'Something else...'],\n ['Done']]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n\ndef facts_to_str(user_data):\n facts = list()\n\n for key, value in user_data.items():\n facts.append('{} - {}'.format(key, value))\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\n\ndef start(bot, update):\n update.message.reply_text(\n \"Hi! My name is Doctor Botter. I will hold a more complex conversation with you. \"\n \"Why don't you tell me something about yourself?\",\n reply_markup=markup)\n\n return CHOOSING\n\n\ndef regular_choice(bot, update, user_data):\n text = update.message.text\n user_data['choice'] = text\n update.message.reply_text(\n 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))\n\n return TYPING_REPLY\n\n\ndef custom_choice(bot, update):\n update.message.reply_text('Alright, please send me the category first, '\n 'for example \"Most impressive skill\"')\n\n return TYPING_CHOICE\n\n\ndef received_information(bot, update, user_data):\n text = update.message.text\n category = user_data['choice']\n user_data[category] = text\n del user_data['choice']\n\n update.message.reply_text(\"Neat! Just so you know, this is what you already told me:\"\n \"{}\"\n \"You can tell me more, or change your opinion on something.\".format(\n facts_to_str(user_data)), reply_markup=markup)\n\n return CHOOSING\n\n\ndef done(bot, update, user_data):\n if 'choice' in user_data:\n del user_data['choice']\n\n update.message.reply_text(\"I learned these facts about you:\"\n \"{}\"\n \"Until next time!\".format(facts_to_str(user_data)))\n\n user_data.clear()\n return ConversationHandler.END\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n # Create the Updater and pass it your bot's token.\n updater = Updater(\"TOKEN\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',\n regular_choice,\n pass_user_data=True),\n RegexHandler('^Something else...$',\n custom_choice),\n ],\n\n TYPING_CHOICE: [MessageHandler(Filters.text,\n regular_choice,\n pass_user_data=True),\n ],\n\n TYPING_REPLY: [MessageHandler(Filters.text,\n received_information,\n pass_user_data=True),\n ],\n },\n\n fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]\n )\n\n dp.add_handler(conv_handler)\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/conversationbot2.py"}]} | 1,902 | 125 |
gh_patches_debug_18263 | rasdani/github-patches | git_diff | kubeflow__pipelines-4130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/components/_python_to_graph_component.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 __all__ = [
16 'create_graph_component_from_pipeline_func',
17 ]
18
19
20 import inspect
21 from collections import OrderedDict
22 from typing import Callable
23
24 from . import _components
25 from ._structures import TaskSpec, ComponentSpec, OutputSpec, GraphInputReference, TaskOutputArgument, GraphImplementation, GraphSpec
26 from ._naming import _make_name_unique_by_adding_index
27 from ._python_op import _extract_component_interface
28 from ._components import _create_task_factory_from_component_spec
29
30
31 def create_graph_component_from_pipeline_func(
32 pipeline_func: Callable,
33 output_component_file: str = None,
34 embed_component_specs: bool = False,
35 ) -> Callable:
36 '''Experimental! Creates graph component definition from a python pipeline function. The component file can be published for sharing.
37 Pipeline function is a function that only calls component functions and passes outputs to inputs.
38 This feature is experimental and lacks support for some of the DSL features like conditions and loops.
39 Only pipelines consisting of loaded components or python components are currently supported (no manually created ContainerOps or ResourceOps).
40
41 Args:
42 pipeline_func: Python function to convert
43 output_component_file: Path of the file where the component definition will be written. The `component.yaml` file can then be published for sharing.
44 embed_component_specs: Whether to embed component definitions or just reference them. Embedding makes the graph component self-contained. Default is False.
45
46 Returns:
47 A function representing the graph component. The component spec can be accessed using the .component_spec attribute.
48 The function will have the same parameters as the original function.
49 When called, the function will return a task object, corresponding to the graph component.
50 To reference the outputs of the task, use task.outputs["Output name"].
51
52 Example:
53
54 producer_op = load_component_from_file('producer/component.yaml')
55 processor_op = load_component_from_file('processor/component.yaml')
56
57 def pipeline1(pipeline_param_1: int):
58 producer_task = producer_op()
59 processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])
60
61 return OrderedDict([
62 ('Pipeline output 1', producer_task.outputs['Output 1']),
63 ('Pipeline output 2', processor_task.outputs['Output 2']),
64 ])
65
66 create_graph_component_from_pipeline_func(pipeline1, output_component_file='pipeline.component.yaml')
67 '''
68 component_spec = create_graph_component_spec_from_pipeline_func(pipeline_func, embed_component_specs)
69 if output_component_file:
70 from pathlib import Path
71 from ._yaml_utils import dump_yaml
72 component_dict = component_spec.to_dict()
73 component_yaml = dump_yaml(component_dict)
74 Path(output_component_file).write_text(component_yaml)
75
76 return _create_task_factory_from_component_spec(component_spec)
77
78 def create_graph_component_spec_from_pipeline_func(pipeline_func: Callable, embed_component_specs: bool = False) -> ComponentSpec:
79
80 component_spec = _extract_component_interface(pipeline_func)
81 # Checking the function parameters - they should not have file passing annotations.
82 input_specs = component_spec.inputs or []
83 for input in input_specs:
84 if input._passing_style:
85 raise TypeError('Graph component function parameter "{}" cannot have file-passing annotation "{}".'.format(input.name, input._passing_style))
86
87 task_map = OrderedDict() #Preserving task order
88
89 from ._components import _create_task_spec_from_component_and_arguments
90 def task_construction_handler(
91 component_spec,
92 arguments,
93 component_ref,
94 ):
95 task = _create_task_spec_from_component_and_arguments(
96 component_spec=component_spec,
97 arguments=arguments,
98 component_ref=component_ref,
99 )
100
101 #Rewriting task ids so that they're same every time
102 task_id = task.component_ref.spec.name or "Task"
103 task_id = _make_name_unique_by_adding_index(task_id, task_map.keys(), ' ')
104 for output_ref in task.outputs.values():
105 output_ref.task_output.task_id = task_id
106 output_ref.task_output.task = None
107 task_map[task_id] = task
108 # Remove the component spec from component reference unless it will make the reference empty or unless explicitly asked by the user
109 if not embed_component_specs and any([task.component_ref.name, task.component_ref.url, task.component_ref.digest]):
110 task.component_ref.spec = None
111
112 return task #The handler is a transformation function, so it must pass the task through.
113
114 # Preparing the pipeline_func arguments
115 # TODO: The key should be original parameter name if different
116 pipeline_func_args = {input.name: GraphInputReference(input_name=input.name).as_argument() for input in input_specs}
117
118 try:
119 #Setting the handler to fix and catch the tasks.
120 # FIX: The handler only hooks container component creation
121 old_handler = _components._container_task_constructor
122 _components._container_task_constructor = task_construction_handler
123
124 #Calling the pipeline_func with GraphInputArgument instances as arguments
125 pipeline_func_result = pipeline_func(**pipeline_func_args)
126 finally:
127 _components._container_task_constructor = old_handler
128
129
130 # Getting graph outputs
131 output_names = [output.name for output in (component_spec.outputs or [])]
132
133 if len(output_names) == 1 and output_names[0] == 'Output': # TODO: Check whether the NamedTuple syntax was used
134 pipeline_func_result = [pipeline_func_result]
135
136 if isinstance(pipeline_func_result, tuple) and hasattr(pipeline_func_result, '_asdict'): # collections.namedtuple and typing.NamedTuple
137 pipeline_func_result = pipeline_func_result._asdict()
138
139 if isinstance(pipeline_func_result, dict):
140 if output_names:
141 if set(output_names) != set(pipeline_func_result.keys()):
142 raise ValueError('Returned outputs do not match outputs specified in the function signature: {} = {}'.format(str(set(pipeline_func_result.keys())), str(set(output_names))))
143
144 if pipeline_func_result is None:
145 graph_output_value_map = {}
146 elif isinstance(pipeline_func_result, dict):
147 graph_output_value_map = OrderedDict(pipeline_func_result)
148 elif isinstance(pipeline_func_result, (list, tuple)):
149 if output_names:
150 if len(pipeline_func_result) != len(output_names):
151 raise ValueError('Expected {} values from pipeline function, but got {}.'.format(len(output_names), len(pipeline_func_result)))
152 graph_output_value_map = OrderedDict((name_value[0], name_value[1]) for name_value in zip(output_names, pipeline_func_result))
153 else:
154 graph_output_value_map = OrderedDict((output_value.task_output.output_name, output_value) for output_value in pipeline_func_result) # TODO: Fix possible name non-uniqueness (e.g. use task id as prefix or add index to non-unique names)
155 else:
156 raise TypeError('Pipeline must return outputs as tuple or OrderedDict.')
157
158 #Checking the pipeline_func output object types
159 for output_name, output_value in graph_output_value_map.items():
160 if not isinstance(output_value, TaskOutputArgument):
161 raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got "{output_name}" = "{}".'.format(output_name, str(output_value)))
162
163 if not component_spec.outputs and graph_output_value_map:
164 component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]
165
166 component_spec.implementation = GraphImplementation(
167 graph=GraphSpec(
168 tasks=task_map,
169 output_values=graph_output_value_map,
170 )
171 )
172 return component_spec
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/components/_python_to_graph_component.py b/sdk/python/kfp/components/_python_to_graph_component.py
--- a/sdk/python/kfp/components/_python_to_graph_component.py
+++ b/sdk/python/kfp/components/_python_to_graph_component.py
@@ -158,7 +158,7 @@
#Checking the pipeline_func output object types
for output_name, output_value in graph_output_value_map.items():
if not isinstance(output_value, TaskOutputArgument):
- raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got "{output_name}" = "{}".'.format(output_name, str(output_value)))
+ raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got "{}" = "{}".'.format(output_name, str(output_value)))
if not component_spec.outputs and graph_output_value_map:
component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]
| {"golden_diff": "diff --git a/sdk/python/kfp/components/_python_to_graph_component.py b/sdk/python/kfp/components/_python_to_graph_component.py\n--- a/sdk/python/kfp/components/_python_to_graph_component.py\n+++ b/sdk/python/kfp/components/_python_to_graph_component.py\n@@ -158,7 +158,7 @@\n #Checking the pipeline_func output object types\n for output_name, output_value in graph_output_value_map.items():\n if not isinstance(output_value, TaskOutputArgument):\n- raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{output_name}\" = \"{}\".'.format(output_name, str(output_value)))\n+ raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{}\" = \"{}\".'.format(output_name, str(output_value)))\n \n if not component_spec.outputs and graph_output_value_map:\n component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n 'create_graph_component_from_pipeline_func',\n]\n\n\nimport inspect\nfrom collections import OrderedDict\nfrom typing import Callable\n\nfrom . import _components\nfrom ._structures import TaskSpec, ComponentSpec, OutputSpec, GraphInputReference, TaskOutputArgument, GraphImplementation, GraphSpec\nfrom ._naming import _make_name_unique_by_adding_index\nfrom ._python_op import _extract_component_interface\nfrom ._components import _create_task_factory_from_component_spec\n\n\ndef create_graph_component_from_pipeline_func(\n pipeline_func: Callable,\n output_component_file: str = None,\n embed_component_specs: bool = False,\n) -> Callable:\n '''Experimental! Creates graph component definition from a python pipeline function. The component file can be published for sharing.\n Pipeline function is a function that only calls component functions and passes outputs to inputs.\n This feature is experimental and lacks support for some of the DSL features like conditions and loops.\n Only pipelines consisting of loaded components or python components are currently supported (no manually created ContainerOps or ResourceOps).\n\n Args:\n pipeline_func: Python function to convert\n output_component_file: Path of the file where the component definition will be written. The `component.yaml` file can then be published for sharing.\n embed_component_specs: Whether to embed component definitions or just reference them. Embedding makes the graph component self-contained. Default is False.\n\n Returns:\n A function representing the graph component. The component spec can be accessed using the .component_spec attribute.\n The function will have the same parameters as the original function.\n When called, the function will return a task object, corresponding to the graph component.\n To reference the outputs of the task, use task.outputs[\"Output name\"].\n\n Example:\n\n producer_op = load_component_from_file('producer/component.yaml')\n processor_op = load_component_from_file('processor/component.yaml')\n\n def pipeline1(pipeline_param_1: int):\n producer_task = producer_op()\n processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])\n\n return OrderedDict([\n ('Pipeline output 1', producer_task.outputs['Output 1']),\n ('Pipeline output 2', processor_task.outputs['Output 2']),\n ])\n \n create_graph_component_from_pipeline_func(pipeline1, output_component_file='pipeline.component.yaml')\n '''\n component_spec = create_graph_component_spec_from_pipeline_func(pipeline_func, embed_component_specs)\n if output_component_file:\n from pathlib import Path\n from ._yaml_utils import dump_yaml\n component_dict = component_spec.to_dict()\n component_yaml = dump_yaml(component_dict)\n Path(output_component_file).write_text(component_yaml)\n\n return _create_task_factory_from_component_spec(component_spec)\n\ndef create_graph_component_spec_from_pipeline_func(pipeline_func: Callable, embed_component_specs: bool = False) -> ComponentSpec:\n\n component_spec = _extract_component_interface(pipeline_func)\n # Checking the function parameters - they should not have file passing annotations.\n input_specs = component_spec.inputs or []\n for input in input_specs:\n if input._passing_style:\n raise TypeError('Graph component function parameter \"{}\" cannot have file-passing annotation \"{}\".'.format(input.name, input._passing_style))\n\n task_map = OrderedDict() #Preserving task order\n\n from ._components import _create_task_spec_from_component_and_arguments\n def task_construction_handler(\n component_spec,\n arguments,\n component_ref,\n ):\n task = _create_task_spec_from_component_and_arguments(\n component_spec=component_spec,\n arguments=arguments,\n component_ref=component_ref,\n )\n\n #Rewriting task ids so that they're same every time\n task_id = task.component_ref.spec.name or \"Task\"\n task_id = _make_name_unique_by_adding_index(task_id, task_map.keys(), ' ')\n for output_ref in task.outputs.values():\n output_ref.task_output.task_id = task_id\n output_ref.task_output.task = None\n task_map[task_id] = task\n # Remove the component spec from component reference unless it will make the reference empty or unless explicitly asked by the user\n if not embed_component_specs and any([task.component_ref.name, task.component_ref.url, task.component_ref.digest]):\n task.component_ref.spec = None\n\n return task #The handler is a transformation function, so it must pass the task through.\n\n # Preparing the pipeline_func arguments\n # TODO: The key should be original parameter name if different\n pipeline_func_args = {input.name: GraphInputReference(input_name=input.name).as_argument() for input in input_specs}\n\n try:\n #Setting the handler to fix and catch the tasks.\n # FIX: The handler only hooks container component creation\n old_handler = _components._container_task_constructor\n _components._container_task_constructor = task_construction_handler\n \n #Calling the pipeline_func with GraphInputArgument instances as arguments \n pipeline_func_result = pipeline_func(**pipeline_func_args)\n finally:\n _components._container_task_constructor = old_handler\n\n\n # Getting graph outputs\n output_names = [output.name for output in (component_spec.outputs or [])]\n\n if len(output_names) == 1 and output_names[0] == 'Output': # TODO: Check whether the NamedTuple syntax was used\n pipeline_func_result = [pipeline_func_result]\n\n if isinstance(pipeline_func_result, tuple) and hasattr(pipeline_func_result, '_asdict'): # collections.namedtuple and typing.NamedTuple\n pipeline_func_result = pipeline_func_result._asdict()\n\n if isinstance(pipeline_func_result, dict):\n if output_names:\n if set(output_names) != set(pipeline_func_result.keys()):\n raise ValueError('Returned outputs do not match outputs specified in the function signature: {} = {}'.format(str(set(pipeline_func_result.keys())), str(set(output_names))))\n\n if pipeline_func_result is None:\n graph_output_value_map = {}\n elif isinstance(pipeline_func_result, dict):\n graph_output_value_map = OrderedDict(pipeline_func_result)\n elif isinstance(pipeline_func_result, (list, tuple)):\n if output_names:\n if len(pipeline_func_result) != len(output_names):\n raise ValueError('Expected {} values from pipeline function, but got {}.'.format(len(output_names), len(pipeline_func_result)))\n graph_output_value_map = OrderedDict((name_value[0], name_value[1]) for name_value in zip(output_names, pipeline_func_result))\n else:\n graph_output_value_map = OrderedDict((output_value.task_output.output_name, output_value) for output_value in pipeline_func_result) # TODO: Fix possible name non-uniqueness (e.g. use task id as prefix or add index to non-unique names)\n else:\n raise TypeError('Pipeline must return outputs as tuple or OrderedDict.')\n\n #Checking the pipeline_func output object types\n for output_name, output_value in graph_output_value_map.items():\n if not isinstance(output_value, TaskOutputArgument):\n raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{output_name}\" = \"{}\".'.format(output_name, str(output_value)))\n\n if not component_spec.outputs and graph_output_value_map:\n component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]\n\n component_spec.implementation = GraphImplementation(\n graph=GraphSpec(\n tasks=task_map,\n output_values=graph_output_value_map,\n )\n )\n return component_spec\n", "path": "sdk/python/kfp/components/_python_to_graph_component.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n 'create_graph_component_from_pipeline_func',\n]\n\n\nimport inspect\nfrom collections import OrderedDict\nfrom typing import Callable\n\nfrom . import _components\nfrom ._structures import TaskSpec, ComponentSpec, OutputSpec, GraphInputReference, TaskOutputArgument, GraphImplementation, GraphSpec\nfrom ._naming import _make_name_unique_by_adding_index\nfrom ._python_op import _extract_component_interface\nfrom ._components import _create_task_factory_from_component_spec\n\n\ndef create_graph_component_from_pipeline_func(\n pipeline_func: Callable,\n output_component_file: str = None,\n embed_component_specs: bool = False,\n) -> Callable:\n '''Experimental! Creates graph component definition from a python pipeline function. The component file can be published for sharing.\n Pipeline function is a function that only calls component functions and passes outputs to inputs.\n This feature is experimental and lacks support for some of the DSL features like conditions and loops.\n Only pipelines consisting of loaded components or python components are currently supported (no manually created ContainerOps or ResourceOps).\n\n Args:\n pipeline_func: Python function to convert\n output_component_file: Path of the file where the component definition will be written. The `component.yaml` file can then be published for sharing.\n embed_component_specs: Whether to embed component definitions or just reference them. Embedding makes the graph component self-contained. Default is False.\n\n Returns:\n A function representing the graph component. The component spec can be accessed using the .component_spec attribute.\n The function will have the same parameters as the original function.\n When called, the function will return a task object, corresponding to the graph component.\n To reference the outputs of the task, use task.outputs[\"Output name\"].\n\n Example:\n\n producer_op = load_component_from_file('producer/component.yaml')\n processor_op = load_component_from_file('processor/component.yaml')\n\n def pipeline1(pipeline_param_1: int):\n producer_task = producer_op()\n processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])\n\n return OrderedDict([\n ('Pipeline output 1', producer_task.outputs['Output 1']),\n ('Pipeline output 2', processor_task.outputs['Output 2']),\n ])\n \n create_graph_component_from_pipeline_func(pipeline1, output_component_file='pipeline.component.yaml')\n '''\n component_spec = create_graph_component_spec_from_pipeline_func(pipeline_func, embed_component_specs)\n if output_component_file:\n from pathlib import Path\n from ._yaml_utils import dump_yaml\n component_dict = component_spec.to_dict()\n component_yaml = dump_yaml(component_dict)\n Path(output_component_file).write_text(component_yaml)\n\n return _create_task_factory_from_component_spec(component_spec)\n\ndef create_graph_component_spec_from_pipeline_func(pipeline_func: Callable, embed_component_specs: bool = False) -> ComponentSpec:\n\n component_spec = _extract_component_interface(pipeline_func)\n # Checking the function parameters - they should not have file passing annotations.\n input_specs = component_spec.inputs or []\n for input in input_specs:\n if input._passing_style:\n raise TypeError('Graph component function parameter \"{}\" cannot have file-passing annotation \"{}\".'.format(input.name, input._passing_style))\n\n task_map = OrderedDict() #Preserving task order\n\n from ._components import _create_task_spec_from_component_and_arguments\n def task_construction_handler(\n component_spec,\n arguments,\n component_ref,\n ):\n task = _create_task_spec_from_component_and_arguments(\n component_spec=component_spec,\n arguments=arguments,\n component_ref=component_ref,\n )\n\n #Rewriting task ids so that they're same every time\n task_id = task.component_ref.spec.name or \"Task\"\n task_id = _make_name_unique_by_adding_index(task_id, task_map.keys(), ' ')\n for output_ref in task.outputs.values():\n output_ref.task_output.task_id = task_id\n output_ref.task_output.task = None\n task_map[task_id] = task\n # Remove the component spec from component reference unless it will make the reference empty or unless explicitly asked by the user\n if not embed_component_specs and any([task.component_ref.name, task.component_ref.url, task.component_ref.digest]):\n task.component_ref.spec = None\n\n return task #The handler is a transformation function, so it must pass the task through.\n\n # Preparing the pipeline_func arguments\n # TODO: The key should be original parameter name if different\n pipeline_func_args = {input.name: GraphInputReference(input_name=input.name).as_argument() for input in input_specs}\n\n try:\n #Setting the handler to fix and catch the tasks.\n # FIX: The handler only hooks container component creation\n old_handler = _components._container_task_constructor\n _components._container_task_constructor = task_construction_handler\n \n #Calling the pipeline_func with GraphInputArgument instances as arguments \n pipeline_func_result = pipeline_func(**pipeline_func_args)\n finally:\n _components._container_task_constructor = old_handler\n\n\n # Getting graph outputs\n output_names = [output.name for output in (component_spec.outputs or [])]\n\n if len(output_names) == 1 and output_names[0] == 'Output': # TODO: Check whether the NamedTuple syntax was used\n pipeline_func_result = [pipeline_func_result]\n\n if isinstance(pipeline_func_result, tuple) and hasattr(pipeline_func_result, '_asdict'): # collections.namedtuple and typing.NamedTuple\n pipeline_func_result = pipeline_func_result._asdict()\n\n if isinstance(pipeline_func_result, dict):\n if output_names:\n if set(output_names) != set(pipeline_func_result.keys()):\n raise ValueError('Returned outputs do not match outputs specified in the function signature: {} = {}'.format(str(set(pipeline_func_result.keys())), str(set(output_names))))\n\n if pipeline_func_result is None:\n graph_output_value_map = {}\n elif isinstance(pipeline_func_result, dict):\n graph_output_value_map = OrderedDict(pipeline_func_result)\n elif isinstance(pipeline_func_result, (list, tuple)):\n if output_names:\n if len(pipeline_func_result) != len(output_names):\n raise ValueError('Expected {} values from pipeline function, but got {}.'.format(len(output_names), len(pipeline_func_result)))\n graph_output_value_map = OrderedDict((name_value[0], name_value[1]) for name_value in zip(output_names, pipeline_func_result))\n else:\n graph_output_value_map = OrderedDict((output_value.task_output.output_name, output_value) for output_value in pipeline_func_result) # TODO: Fix possible name non-uniqueness (e.g. use task id as prefix or add index to non-unique names)\n else:\n raise TypeError('Pipeline must return outputs as tuple or OrderedDict.')\n\n #Checking the pipeline_func output object types\n for output_name, output_value in graph_output_value_map.items():\n if not isinstance(output_value, TaskOutputArgument):\n raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{}\" = \"{}\".'.format(output_name, str(output_value)))\n\n if not component_spec.outputs and graph_output_value_map:\n component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]\n\n component_spec.implementation = GraphImplementation(\n graph=GraphSpec(\n tasks=task_map,\n output_values=graph_output_value_map,\n )\n )\n return component_spec\n", "path": "sdk/python/kfp/components/_python_to_graph_component.py"}]} | 2,758 | 218 |
gh_patches_debug_13918 | rasdani/github-patches | git_diff | falconry__falcon-61 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename Request.body to Request.stream
Mirros Response. And it really is a stream.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/request.py`
Content:
```
1 """Defines the Request class.
2
3 Copyright 2013 by Rackspace Hosting, Inc.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 """
18
19 from datetime import datetime
20
21 import six
22
23 from falcon.request_helpers import *
24 from falcon.exceptions import *
25
26 DEFAULT_ERROR_LOG_FORMAT = ('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR]'
27 ' {1} {2}?{3} => {4}\n')
28
29
30 class Request(object):
31 """Represents a client's HTTP request"""
32
33 __slots__ = (
34 'app',
35 'body',
36 '_headers',
37 'method',
38 '_params',
39 'path',
40 'protocol',
41 'query_string',
42 '_wsgierrors'
43 )
44
45 def __init__(self, env):
46 """Initialize attributes based on a WSGI environment dict
47
48 Note: Request is not meant to be instantiated directory by responders.
49
50 Args:
51 env: A WSGI environment dict passed in from the server. See also
52 the PEP-333 spec.
53
54 """
55
56 self._wsgierrors = env['wsgi.errors']
57 self.body = env['wsgi.input']
58
59 self.protocol = env['wsgi.url_scheme']
60 self.app = env['SCRIPT_NAME']
61 self.method = env['REQUEST_METHOD']
62 self.path = env['PATH_INFO'] or '/'
63 self.query_string = query_string = env['QUERY_STRING']
64
65 self._params = parse_query_string(query_string)
66 self._headers = parse_headers(env)
67
68 def log_error(self, message):
69 """Log an error to wsgi.error
70
71 Prepends timestamp and request info to message, and writes the
72 result out to the WSGI server's error stream (wsgi.error).
73
74 Args:
75 message: A string describing the problem. If a byte-string and
76 running under Python 2, the string is assumed to be encoded
77 as UTF-8.
78
79 """
80 if not six.PY3 and isinstance(message, unicode):
81 message = message.encode('utf-8')
82
83 log_line = (
84 DEFAULT_ERROR_LOG_FORMAT.
85 format(datetime.now(), self.method, self.path,
86 self.query_string, message)
87 )
88
89 self._wsgierrors.write(log_line)
90
91 def client_accepts_json(self):
92 """Return True if the Accept header indicates JSON support"""
93
94 accept = self.get_header('Accept')
95 if accept is not None:
96 return ('application/json' in accept) or ('*/*' in accept)
97
98 return False
99
100 def get_header(self, name, default=None, required=False):
101 """Return a header value as a string
102
103 Args:
104 name: Header name, case-insensitive (e.g., 'Content-Type')
105 default: Value to return in case the header is not
106 found (default None)
107 required: Set to True to raise HttpBadRequest instead
108 of returning gracefully when the header is not found
109 (default False)
110
111 """
112
113 # Use try..except to optimize for the header existing in most cases
114 try:
115 # Don't take the time to cache beforehand, using HTTP naming.
116 # This will be faster, assuming that most headers are looked
117 # up only once, and not all headers will be requested.
118 return self._headers[name.upper().replace('-', '_')]
119 except KeyError:
120 if not required:
121 return default
122
123 raise HTTPBadRequest('Missing header',
124 'The "' + name + '" header is required.')
125
126 def get_param(self, name, default=None, required=False):
127 """Return the value of a query string parameter as a string
128
129 Args:
130 name: Parameter name, case-sensitive (e.g., 'sort')
131 default: Value to return in case the parameter is not found in the
132 query string (default None)
133 required: Set to True to raise HTTPBadRequest instead of returning
134 gracefully when the parameter is not found (default False)
135
136 Returns:
137 The value of the param as a byte string, or the default value if
138 param is not found and is not required.
139
140 Raises
141 HTTPBadRequest: The param was not found in the request, but was
142 required.
143
144 """
145
146 # PERF: Use if..in since it is a good all-around performer; we don't
147 # know how likely params are to be specified by clients.
148 if name in self._params:
149 return self._params[name]
150
151 if not required:
152 return default
153
154 raise HTTPBadRequest('Missing query parameter',
155 'The "' + name + '" query parameter is required.')
156
157 def get_param_as_int(self, name, default=None, required=False):
158 """Return the value of a query string parameter as an int
159
160 Args:
161 name: Parameter name, case-sensitive (e.g., 'limit')
162 default: Value to return in case the parameter is not found in the
163 query string, or it is not an integer (default None)
164 required: Set to True to raise HTTPBadRequest instead of returning
165 gracefully when the parameter is not found or is not an
166 integer (default False)
167
168 Returns:
169 The value of the param if it is found and can be converted to an
170 integer. Otherwise, returns the default value unless required is
171 True.
172
173 Raises
174 HTTPBadRequest: The param was not found in the request, but was
175 required.
176
177 """
178
179 # PERF: Use if..in since it is a good all-around performer; we don't
180 # know how likely params are to be specified by clients.
181 if name in self._params:
182 val = self._params[name]
183 try:
184 return int(val)
185 except ValueError:
186 pass
187
188 if not required:
189 return default
190
191 raise HTTPBadRequest('Missing query parameter',
192 'The "' + name + '" query parameter is required.')
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/request.py b/falcon/request.py
--- a/falcon/request.py
+++ b/falcon/request.py
@@ -32,13 +32,13 @@
__slots__ = (
'app',
- 'body',
'_headers',
'method',
'_params',
'path',
'protocol',
'query_string',
+ 'stream',
'_wsgierrors'
)
@@ -54,7 +54,7 @@
"""
self._wsgierrors = env['wsgi.errors']
- self.body = env['wsgi.input']
+ self.stream = env['wsgi.input']
self.protocol = env['wsgi.url_scheme']
self.app = env['SCRIPT_NAME']
| {"golden_diff": "diff --git a/falcon/request.py b/falcon/request.py\n--- a/falcon/request.py\n+++ b/falcon/request.py\n@@ -32,13 +32,13 @@\n \n __slots__ = (\n 'app',\n- 'body',\n '_headers',\n 'method',\n '_params',\n 'path',\n 'protocol',\n 'query_string',\n+ 'stream',\n '_wsgierrors'\n )\n \n@@ -54,7 +54,7 @@\n \"\"\"\n \n self._wsgierrors = env['wsgi.errors']\n- self.body = env['wsgi.input']\n+ self.stream = env['wsgi.input']\n \n self.protocol = env['wsgi.url_scheme']\n self.app = env['SCRIPT_NAME']\n", "issue": "Rename Request.body to Request.stream\nMirros Response. And it really is a stream.\n\n", "before_files": [{"content": "\"\"\"Defines the Request class.\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom datetime import datetime\n\nimport six\n\nfrom falcon.request_helpers import *\nfrom falcon.exceptions import *\n\nDEFAULT_ERROR_LOG_FORMAT = ('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR]'\n ' {1} {2}?{3} => {4}\\n')\n\n\nclass Request(object):\n \"\"\"Represents a client's HTTP request\"\"\"\n\n __slots__ = (\n 'app',\n 'body',\n '_headers',\n 'method',\n '_params',\n 'path',\n 'protocol',\n 'query_string',\n '_wsgierrors'\n )\n\n def __init__(self, env):\n \"\"\"Initialize attributes based on a WSGI environment dict\n\n Note: Request is not meant to be instantiated directory by responders.\n\n Args:\n env: A WSGI environment dict passed in from the server. See also\n the PEP-333 spec.\n\n \"\"\"\n\n self._wsgierrors = env['wsgi.errors']\n self.body = env['wsgi.input']\n\n self.protocol = env['wsgi.url_scheme']\n self.app = env['SCRIPT_NAME']\n self.method = env['REQUEST_METHOD']\n self.path = env['PATH_INFO'] or '/'\n self.query_string = query_string = env['QUERY_STRING']\n\n self._params = parse_query_string(query_string)\n self._headers = parse_headers(env)\n\n def log_error(self, message):\n \"\"\"Log an error to wsgi.error\n\n Prepends timestamp and request info to message, and writes the\n result out to the WSGI server's error stream (wsgi.error).\n\n Args:\n message: A string describing the problem. If a byte-string and\n running under Python 2, the string is assumed to be encoded\n as UTF-8.\n\n \"\"\"\n if not six.PY3 and isinstance(message, unicode):\n message = message.encode('utf-8')\n\n log_line = (\n DEFAULT_ERROR_LOG_FORMAT.\n format(datetime.now(), self.method, self.path,\n self.query_string, message)\n )\n\n self._wsgierrors.write(log_line)\n\n def client_accepts_json(self):\n \"\"\"Return True if the Accept header indicates JSON support\"\"\"\n\n accept = self.get_header('Accept')\n if accept is not None:\n return ('application/json' in accept) or ('*/*' in accept)\n\n return False\n\n def get_header(self, name, default=None, required=False):\n \"\"\"Return a header value as a string\n\n Args:\n name: Header name, case-insensitive (e.g., 'Content-Type')\n default: Value to return in case the header is not\n found (default None)\n required: Set to True to raise HttpBadRequest instead\n of returning gracefully when the header is not found\n (default False)\n\n \"\"\"\n\n # Use try..except to optimize for the header existing in most cases\n try:\n # Don't take the time to cache beforehand, using HTTP naming.\n # This will be faster, assuming that most headers are looked\n # up only once, and not all headers will be requested.\n return self._headers[name.upper().replace('-', '_')]\n except KeyError:\n if not required:\n return default\n\n raise HTTPBadRequest('Missing header',\n 'The \"' + name + '\" header is required.')\n\n def get_param(self, name, default=None, required=False):\n \"\"\"Return the value of a query string parameter as a string\n\n Args:\n name: Parameter name, case-sensitive (e.g., 'sort')\n default: Value to return in case the parameter is not found in the\n query string (default None)\n required: Set to True to raise HTTPBadRequest instead of returning\n gracefully when the parameter is not found (default False)\n\n Returns:\n The value of the param as a byte string, or the default value if\n param is not found and is not required.\n\n Raises\n HTTPBadRequest: The param was not found in the request, but was\n required.\n\n \"\"\"\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n return self._params[name]\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')\n\n def get_param_as_int(self, name, default=None, required=False):\n \"\"\"Return the value of a query string parameter as an int\n\n Args:\n name: Parameter name, case-sensitive (e.g., 'limit')\n default: Value to return in case the parameter is not found in the\n query string, or it is not an integer (default None)\n required: Set to True to raise HTTPBadRequest instead of returning\n gracefully when the parameter is not found or is not an\n integer (default False)\n\n Returns:\n The value of the param if it is found and can be converted to an\n integer. Otherwise, returns the default value unless required is\n True.\n\n Raises\n HTTPBadRequest: The param was not found in the request, but was\n required.\n\n \"\"\"\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n val = self._params[name]\n try:\n return int(val)\n except ValueError:\n pass\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')\n", "path": "falcon/request.py"}], "after_files": [{"content": "\"\"\"Defines the Request class.\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom datetime import datetime\n\nimport six\n\nfrom falcon.request_helpers import *\nfrom falcon.exceptions import *\n\nDEFAULT_ERROR_LOG_FORMAT = ('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR]'\n ' {1} {2}?{3} => {4}\\n')\n\n\nclass Request(object):\n \"\"\"Represents a client's HTTP request\"\"\"\n\n __slots__ = (\n 'app',\n '_headers',\n 'method',\n '_params',\n 'path',\n 'protocol',\n 'query_string',\n 'stream',\n '_wsgierrors'\n )\n\n def __init__(self, env):\n \"\"\"Initialize attributes based on a WSGI environment dict\n\n Note: Request is not meant to be instantiated directory by responders.\n\n Args:\n env: A WSGI environment dict passed in from the server. See also\n the PEP-333 spec.\n\n \"\"\"\n\n self._wsgierrors = env['wsgi.errors']\n self.stream = env['wsgi.input']\n\n self.protocol = env['wsgi.url_scheme']\n self.app = env['SCRIPT_NAME']\n self.method = env['REQUEST_METHOD']\n self.path = env['PATH_INFO'] or '/'\n self.query_string = query_string = env['QUERY_STRING']\n\n self._params = parse_query_string(query_string)\n self._headers = parse_headers(env)\n\n def log_error(self, message):\n \"\"\"Log an error to wsgi.error\n\n Prepends timestamp and request info to message, and writes the\n result out to the WSGI server's error stream (wsgi.error).\n\n Args:\n message: A string describing the problem. If a byte-string and\n running under Python 2, the string is assumed to be encoded\n as UTF-8.\n\n \"\"\"\n if not six.PY3 and isinstance(message, unicode):\n message = message.encode('utf-8')\n\n log_line = (\n DEFAULT_ERROR_LOG_FORMAT.\n format(datetime.now(), self.method, self.path,\n self.query_string, message)\n )\n\n self._wsgierrors.write(log_line)\n\n def client_accepts_json(self):\n \"\"\"Return True if the Accept header indicates JSON support\"\"\"\n\n accept = self.get_header('Accept')\n if accept is not None:\n return ('application/json' in accept) or ('*/*' in accept)\n\n return False\n\n def get_header(self, name, default=None, required=False):\n \"\"\"Return a header value as a string\n\n Args:\n name: Header name, case-insensitive (e.g., 'Content-Type')\n default: Value to return in case the header is not\n found (default None)\n required: Set to True to raise HttpBadRequest instead\n of returning gracefully when the header is not found\n (default False)\n\n \"\"\"\n\n # Use try..except to optimize for the header existing in most cases\n try:\n # Don't take the time to cache beforehand, using HTTP naming.\n # This will be faster, assuming that most headers are looked\n # up only once, and not all headers will be requested.\n return self._headers[name.upper().replace('-', '_')]\n except KeyError:\n if not required:\n return default\n\n raise HTTPBadRequest('Missing header',\n 'The \"' + name + '\" header is required.')\n\n def get_param(self, name, default=None, required=False):\n \"\"\"Return the value of a query string parameter as a string\n\n Args:\n name: Parameter name, case-sensitive (e.g., 'sort')\n default: Value to return in case the parameter is not found in the\n query string (default None)\n required: Set to True to raise HTTPBadRequest instead of returning\n gracefully when the parameter is not found (default False)\n\n Returns:\n The value of the param as a byte string, or the default value if\n param is not found and is not required.\n\n Raises\n HTTPBadRequest: The param was not found in the request, but was\n required.\n\n \"\"\"\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n return self._params[name]\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')\n\n def get_param_as_int(self, name, default=None, required=False):\n \"\"\"Return the value of a query string parameter as an int\n\n Args:\n name: Parameter name, case-sensitive (e.g., 'limit')\n default: Value to return in case the parameter is not found in the\n query string, or it is not an integer (default None)\n required: Set to True to raise HTTPBadRequest instead of returning\n gracefully when the parameter is not found or is not an\n integer (default False)\n\n Returns:\n The value of the param if it is found and can be converted to an\n integer. Otherwise, returns the default value unless required is\n True.\n\n Raises\n HTTPBadRequest: The param was not found in the request, but was\n required.\n\n \"\"\"\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n val = self._params[name]\n try:\n return int(val)\n except ValueError:\n pass\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')\n", "path": "falcon/request.py"}]} | 2,127 | 168 |
gh_patches_debug_39573 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-674 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Jinja 3.x
Jinja 3.x was released last week and it has broken some functionality within the agent. The following tests are currently failing:
```
tests/integration/instruments/test_jinja2_py36plus.py::test_async_render
tests/integration/instruments/test_jinja2_py36plus.py::test_async_render_name
tests/integration/instruments/test_urllib3.py::test_request - pytest.PytestUnraisableExceptionWarning
tests/integration/instruments/test_urllib3.py::test_request_no_absolute_url
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/instruments/jinja2.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 import sys
6
7 import wrapt
8
9 from scout_apm.core.tracked_request import TrackedRequest
10
11 try:
12 from jinja2 import Environment
13 except ImportError: # pragma: no cover
14 Environment = None
15
16 try:
17 from jinja2 import Template
18 except ImportError: # pragma: no cover
19 Template = None
20
21 # The async_ module can only be shipped on Python 3.6+
22 try:
23 from scout_apm.async_.instruments.jinja2 import wrapped_render_async
24 except ImportError:
25 wrapped_render_async = None
26
27
28 logger = logging.getLogger(__name__)
29
30
31 have_patched_environment_init = False
32 have_patched_template_render = False
33 have_patched_template_render_async = False
34
35
36 def ensure_installed():
37 global have_patched_environment_init
38 global have_patched_template_render
39
40 logger.debug("Instrumenting Jinja2.")
41
42 if Template is None:
43 logger.debug("Couldn't import jinja2.Template - probably not installed.")
44 return
45
46 if not have_patched_environment_init:
47 try:
48 Environment.__init__ = wrapped_environment_init(Environment.__init__)
49 except Exception as exc:
50 logger.warning(
51 "Failed to instrument jinja2.Environment.__init__: %r",
52 exc,
53 exc_info=exc,
54 )
55 else:
56 have_patched_environment_init = True
57
58 if not have_patched_template_render:
59 try:
60 Template.render = wrapped_render(Template.render)
61 except Exception as exc:
62 logger.warning(
63 "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc
64 )
65 else:
66 have_patched_template_render = True
67
68
69 @wrapt.decorator
70 def wrapped_render(wrapped, instance, args, kwargs):
71 tracked_request = TrackedRequest.instance()
72 with tracked_request.span(operation="Template/Render") as span:
73 span.tag("name", instance.name)
74 return wrapped(*args, **kwargs)
75
76
77 @wrapt.decorator
78 def wrapped_environment_init(wrapped, instance, args, kwargs):
79 """
80 Delayed wrapping of render_async(), since Template won't have this method
81 until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
82 done conditionally in Environment.__init__:
83 https://github.com/pallets/jinja/issues/765
84 """
85 global have_patched_template_render_async
86 result = wrapped(*args, **kwargs)
87
88 if (
89 wrapped_render_async is not None
90 and not have_patched_template_render_async
91 and "jinja2.asyncsupport" in sys.modules
92 ):
93 try:
94 Template.render_async = wrapped_render_async(Template.render_async)
95 except Exception as exc:
96 logger.warning(
97 "Failed to instrument jinja2.Template.render_async: %r",
98 exc,
99 exc_info=exc,
100 )
101 else:
102 have_patched_template_render_async = True
103
104 return result
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py
--- a/src/scout_apm/instruments/jinja2.py
+++ b/src/scout_apm/instruments/jinja2.py
@@ -34,7 +34,6 @@
def ensure_installed():
- global have_patched_environment_init
global have_patched_template_render
logger.debug("Instrumenting Jinja2.")
@@ -43,9 +42,31 @@
logger.debug("Couldn't import jinja2.Template - probably not installed.")
return
- if not have_patched_environment_init:
+ instrument_render_async()
+
+ if not have_patched_template_render:
+ try:
+ Template.render = wrapped_render(Template.render)
+ except Exception as exc:
+ logger.warning(
+ "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ )
+ else:
+ have_patched_template_render = True
+
+
+def instrument_render_async():
+ global have_patched_environment_init
+ global have_patched_template_render_async
+
+ if wrapped_render_async is None:
+ return
+
+ if not have_patched_environment_init and not hasattr(Template, "render_async"):
try:
- Environment.__init__ = wrapped_environment_init(Environment.__init__)
+ Environment.__init__ = wrapped_environment_init_jinja_v2(
+ Environment.__init__
+ )
except Exception as exc:
logger.warning(
"Failed to instrument jinja2.Environment.__init__: %r",
@@ -54,16 +75,17 @@
)
else:
have_patched_environment_init = True
-
- if not have_patched_template_render:
+ elif hasattr(Template, "render_async") and not have_patched_template_render_async:
try:
- Template.render = wrapped_render(Template.render)
+ Template.render_async = wrapped_render_async(Template.render_async)
except Exception as exc:
logger.warning(
- "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ "Failed to instrument jinja2.Template.render_async: %r",
+ exc,
+ exc_info=exc,
)
else:
- have_patched_template_render = True
+ have_patched_template_render_async = True
@wrapt.decorator
@@ -75,12 +97,14 @@
@wrapt.decorator
-def wrapped_environment_init(wrapped, instance, args, kwargs):
+def wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs):
"""
Delayed wrapping of render_async(), since Template won't have this method
until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
done conditionally in Environment.__init__:
https://github.com/pallets/jinja/issues/765
+
+ This is no longer needed since Jinja2 v3.0.0
"""
global have_patched_template_render_async
result = wrapped(*args, **kwargs)
| {"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -34,7 +34,6 @@\n \n \n def ensure_installed():\n- global have_patched_environment_init\n global have_patched_template_render\n \n logger.debug(\"Instrumenting Jinja2.\")\n@@ -43,9 +42,31 @@\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n \n- if not have_patched_environment_init:\n+ instrument_render_async()\n+\n+ if not have_patched_template_render:\n+ try:\n+ Template.render = wrapped_render(Template.render)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ )\n+ else:\n+ have_patched_template_render = True\n+\n+\n+def instrument_render_async():\n+ global have_patched_environment_init\n+ global have_patched_template_render_async\n+\n+ if wrapped_render_async is None:\n+ return\n+\n+ if not have_patched_environment_init and not hasattr(Template, \"render_async\"):\n try:\n- Environment.__init__ = wrapped_environment_init(Environment.__init__)\n+ Environment.__init__ = wrapped_environment_init_jinja_v2(\n+ Environment.__init__\n+ )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n@@ -54,16 +75,17 @@\n )\n else:\n have_patched_environment_init = True\n-\n- if not have_patched_template_render:\n+ elif hasattr(Template, \"render_async\") and not have_patched_template_render_async:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n- \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Failed to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_template_render_async = True\n \n \n @wrapt.decorator\n@@ -75,12 +97,14 @@\n \n \n @wrapt.decorator\n-def wrapped_environment_init(wrapped, instance, args, kwargs):\n+def wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n+\n+ This is no longer needed since Jinja2 v3.0.0\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n", "issue": "Support Jinja 3.x\nJinja 3.x was released last week and it has broken some functionality within the agent. The following tests are currently failing:\r\n\r\n```\r\ntests/integration/instruments/test_jinja2_py36plus.py::test_async_render\r\ntests/integration/instruments/test_jinja2_py36plus.py::test_async_render_name\r\ntests/integration/instruments/test_urllib3.py::test_request - pytest.PytestUnraisableExceptionWarning\r\ntests/integration/instruments/test_urllib3.py::test_request_no_absolute_url\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Environment\nexcept ImportError: # pragma: no cover\n Environment = None\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_environment_init = False\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_environment_init\n global have_patched_template_render\n\n logger.debug(\"Instrumenting Jinja2.\")\n\n if Template is None:\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n\n if not have_patched_environment_init:\n try:\n Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_environment_init = True\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=\"Template/Render\") as span:\n span.tag(\"name\", instance.name)\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrapped_environment_init(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n\n if (\n wrapped_render_async is not None\n and not have_patched_template_render_async\n and \"jinja2.asyncsupport\" in sys.modules\n ):\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n return result\n", "path": "src/scout_apm/instruments/jinja2.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Environment\nexcept ImportError: # pragma: no cover\n Environment = None\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_environment_init = False\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_template_render\n\n logger.debug(\"Instrumenting Jinja2.\")\n\n if Template is None:\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n\n instrument_render_async()\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n\ndef instrument_render_async():\n global have_patched_environment_init\n global have_patched_template_render_async\n\n if wrapped_render_async is None:\n return\n\n if not have_patched_environment_init and not hasattr(Template, \"render_async\"):\n try:\n Environment.__init__ = wrapped_environment_init_jinja_v2(\n Environment.__init__\n )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_environment_init = True\n elif hasattr(Template, \"render_async\") and not have_patched_template_render_async:\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=\"Template/Render\") as span:\n span.tag(\"name\", instance.name)\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n\n This is no longer needed since Jinja2 v3.0.0\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n\n if (\n wrapped_render_async is not None\n and not have_patched_template_render_async\n and \"jinja2.asyncsupport\" in sys.modules\n ):\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n return result\n", "path": "src/scout_apm/instruments/jinja2.py"}]} | 1,252 | 692 |
gh_patches_debug_12015 | rasdani/github-patches | git_diff | iterative__dvc-8505 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`exp run`: data gets re-imported on every call
# Bug Report
## Description
When a pipeline uses an imported data file (with `dvc import`), the data gets cloned(?) and hashed every time `dvc exp run` is called.
### Reproduce
1. dvc import [email protected]:iterative/dataset-registry.git use-cases/cats-dogs
2. dvc stage add -n foo -d cats-dogs echo foo
3. dvc exp run
### Expected
When using `dvc repro` the imported data doesn't get re-hashed. I would expect `dvc exp run` to behave the same.
### Environment information
**Output of `dvc doctor`:**
```console
$ dvc doctor
DVC version: 2.6.3 (pip)
---------------------------------
Platform: Python 3.9.6 on macOS-10.16-x86_64-i386-64bit
Supports:
gdrive (pydrive2 = 1.9.1),
http (requests = 2.26.0),
https (requests = 2.26.0)
Cache types: reflink, hardlink, symlink
Cache directory: apfs on /dev/disk1s1s1
Caches: local
Remotes: None
Workspace directory: apfs on /dev/disk1s1s1
Repo: dvc, git
```
**Additional Information (if any):**
```console
$ dvc repro -v
2021-08-25 11:11:55,186 DEBUG: Computed stage: 'cats-dogs.dvc' md5: '5a135b297ee3c96465ce4b320f44fb8b'
'cats-dogs.dvc' didn't change, skipping
Stage 'foo' didn't change, skipping
Data and pipelines are up to date.
```
```console
$ dvc exp run -v
2021-08-25 11:12:15,672 DEBUG: Detaching HEAD at 'HEAD'
2021-08-25 11:12:15,690 DEBUG: Stashing workspace
2021-08-25 11:12:15,700 DEBUG: No changes to stash
2021-08-25 11:12:15,749 DEBUG: Creating external repo [email protected]:iterative/dataset-registry.git@ca140591a21c6d75a7057d1e2eb3f51d3115c5f5
2021-08-25 11:12:15,749 DEBUG: erepo: git clone '[email protected]:iterative/dataset-registry.git' to a temporary dir
Computing file/dir hashes (only done once)
. . .
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/commit.py`
Content:
```
1 from dvc import prompt
2
3 from . import locked
4
5
6 def _prepare_message(stage, changes):
7 changed_deps, changed_outs, changed_stage = changes
8 if changed_deps and changed_outs:
9 msg = "dependencies {deps} and outputs {outs} of {stage} changed."
10 elif changed_deps:
11 msg = "dependencies {deps} of {stage} changed."
12 elif changed_outs:
13 msg = "outputs {outs} of {stage} changed."
14 else:
15 msg = "{stage_changed}"
16
17 msg += " Are you sure you want to commit it?"
18
19 kw = {
20 "stage": stage,
21 "deps": changed_deps,
22 "outs": changed_outs,
23 "stage_changed": changed_stage,
24 }
25 return msg.format_map(kw)
26
27
28 def prompt_to_commit(stage, changes, force=False):
29 from dvc.stage.exceptions import StageCommitError
30
31 if not (force or prompt.confirm(_prepare_message(stage, changes))):
32 raise StageCommitError(
33 "unable to commit changed {}. Use `-f|--force` to "
34 "force.".format(stage)
35 )
36
37
38 @locked
39 def commit(
40 self,
41 target,
42 with_deps=False,
43 recursive=False,
44 force=False,
45 allow_missing=False,
46 data_only=False,
47 ):
48 from dvc.dvcfile import Dvcfile
49
50 stages_info = [
51 info
52 for info in self.stage.collect_granular(
53 target, with_deps=with_deps, recursive=recursive
54 )
55 if not data_only or info.stage.is_data_source
56 ]
57 for stage_info in stages_info:
58 stage = stage_info.stage
59 changes = stage.changed_entries()
60 if any(changes):
61 prompt_to_commit(stage, changes, force=force)
62 stage.save(allow_missing=allow_missing)
63 stage.commit(
64 filter_info=stage_info.filter_info, allow_missing=allow_missing
65 )
66
67 Dvcfile(self, stage.path).dump(stage, update_pipeline=False)
68 return [s.stage for s in stages_info]
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/repo/commit.py b/dvc/repo/commit.py
--- a/dvc/repo/commit.py
+++ b/dvc/repo/commit.py
@@ -56,10 +56,13 @@
]
for stage_info in stages_info:
stage = stage_info.stage
- changes = stage.changed_entries()
- if any(changes):
- prompt_to_commit(stage, changes, force=force)
+ if force:
stage.save(allow_missing=allow_missing)
+ else:
+ changes = stage.changed_entries()
+ if any(changes):
+ prompt_to_commit(stage, changes, force=force)
+ stage.save(allow_missing=allow_missing)
stage.commit(
filter_info=stage_info.filter_info, allow_missing=allow_missing
)
| {"golden_diff": "diff --git a/dvc/repo/commit.py b/dvc/repo/commit.py\n--- a/dvc/repo/commit.py\n+++ b/dvc/repo/commit.py\n@@ -56,10 +56,13 @@\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n- changes = stage.changed_entries()\n- if any(changes):\n- prompt_to_commit(stage, changes, force=force)\n+ if force:\n stage.save(allow_missing=allow_missing)\n+ else:\n+ changes = stage.changed_entries()\n+ if any(changes):\n+ prompt_to_commit(stage, changes, force=force)\n+ stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n", "issue": "`exp run`: data gets re-imported on every call\n# Bug Report\r\n\r\n## Description\r\n\r\nWhen a pipeline uses an imported data file (with `dvc import`), the data gets cloned(?) and hashed every time `dvc exp run` is called.\r\n\r\n### Reproduce\r\n\r\n1. dvc import [email protected]:iterative/dataset-registry.git use-cases/cats-dogs\r\n2. dvc stage add -n foo -d cats-dogs echo foo\r\n3. dvc exp run\r\n\r\n### Expected\r\n\r\nWhen using `dvc repro` the imported data doesn't get re-hashed. I would expect `dvc exp run` to behave the same.\r\n\r\n### Environment information\r\n\r\n**Output of `dvc doctor`:**\r\n\r\n```console\r\n$ dvc doctor\r\nDVC version: 2.6.3 (pip)\r\n---------------------------------\r\nPlatform: Python 3.9.6 on macOS-10.16-x86_64-i386-64bit\r\nSupports:\r\n gdrive (pydrive2 = 1.9.1),\r\n http (requests = 2.26.0),\r\n https (requests = 2.26.0)\r\nCache types: reflink, hardlink, symlink\r\nCache directory: apfs on /dev/disk1s1s1\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: apfs on /dev/disk1s1s1\r\nRepo: dvc, git\r\n```\r\n\r\n**Additional Information (if any):**\r\n\r\n```console\r\n$ dvc repro -v\r\n2021-08-25 11:11:55,186 DEBUG: Computed stage: 'cats-dogs.dvc' md5: '5a135b297ee3c96465ce4b320f44fb8b'\r\n'cats-dogs.dvc' didn't change, skipping\r\nStage 'foo' didn't change, skipping\r\nData and pipelines are up to date.\r\n```\r\n\r\n```console\r\n$ dvc exp run -v\r\n2021-08-25 11:12:15,672 DEBUG: Detaching HEAD at 'HEAD' \r\n2021-08-25 11:12:15,690 DEBUG: Stashing workspace\r\n2021-08-25 11:12:15,700 DEBUG: No changes to stash\r\n2021-08-25 11:12:15,749 DEBUG: Creating external repo [email protected]:iterative/dataset-registry.git@ca140591a21c6d75a7057d1e2eb3f51d3115c5f5\r\n2021-08-25 11:12:15,749 DEBUG: erepo: git clone '[email protected]:iterative/dataset-registry.git' to a temporary dir\r\nComputing file/dir hashes (only done once) \r\n. . . \r\n```\r\n\r\n\n", "before_files": [{"content": "from dvc import prompt\n\nfrom . import locked\n\n\ndef _prepare_message(stage, changes):\n changed_deps, changed_outs, changed_stage = changes\n if changed_deps and changed_outs:\n msg = \"dependencies {deps} and outputs {outs} of {stage} changed.\"\n elif changed_deps:\n msg = \"dependencies {deps} of {stage} changed.\"\n elif changed_outs:\n msg = \"outputs {outs} of {stage} changed.\"\n else:\n msg = \"{stage_changed}\"\n\n msg += \" Are you sure you want to commit it?\"\n\n kw = {\n \"stage\": stage,\n \"deps\": changed_deps,\n \"outs\": changed_outs,\n \"stage_changed\": changed_stage,\n }\n return msg.format_map(kw)\n\n\ndef prompt_to_commit(stage, changes, force=False):\n from dvc.stage.exceptions import StageCommitError\n\n if not (force or prompt.confirm(_prepare_message(stage, changes))):\n raise StageCommitError(\n \"unable to commit changed {}. Use `-f|--force` to \"\n \"force.\".format(stage)\n )\n\n\n@locked\ndef commit(\n self,\n target,\n with_deps=False,\n recursive=False,\n force=False,\n allow_missing=False,\n data_only=False,\n):\n from dvc.dvcfile import Dvcfile\n\n stages_info = [\n info\n for info in self.stage.collect_granular(\n target, with_deps=with_deps, recursive=recursive\n )\n if not data_only or info.stage.is_data_source\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n changes = stage.changed_entries()\n if any(changes):\n prompt_to_commit(stage, changes, force=force)\n stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n\n Dvcfile(self, stage.path).dump(stage, update_pipeline=False)\n return [s.stage for s in stages_info]\n", "path": "dvc/repo/commit.py"}], "after_files": [{"content": "from dvc import prompt\n\nfrom . import locked\n\n\ndef _prepare_message(stage, changes):\n changed_deps, changed_outs, changed_stage = changes\n if changed_deps and changed_outs:\n msg = \"dependencies {deps} and outputs {outs} of {stage} changed.\"\n elif changed_deps:\n msg = \"dependencies {deps} of {stage} changed.\"\n elif changed_outs:\n msg = \"outputs {outs} of {stage} changed.\"\n else:\n msg = \"{stage_changed}\"\n\n msg += \" Are you sure you want to commit it?\"\n\n kw = {\n \"stage\": stage,\n \"deps\": changed_deps,\n \"outs\": changed_outs,\n \"stage_changed\": changed_stage,\n }\n return msg.format_map(kw)\n\n\ndef prompt_to_commit(stage, changes, force=False):\n from dvc.stage.exceptions import StageCommitError\n\n if not (force or prompt.confirm(_prepare_message(stage, changes))):\n raise StageCommitError(\n \"unable to commit changed {}. Use `-f|--force` to \"\n \"force.\".format(stage)\n )\n\n\n@locked\ndef commit(\n self,\n target,\n with_deps=False,\n recursive=False,\n force=False,\n allow_missing=False,\n data_only=False,\n):\n from dvc.dvcfile import Dvcfile\n\n stages_info = [\n info\n for info in self.stage.collect_granular(\n target, with_deps=with_deps, recursive=recursive\n )\n if not data_only or info.stage.is_data_source\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n if force:\n stage.save(allow_missing=allow_missing)\n else:\n changes = stage.changed_entries()\n if any(changes):\n prompt_to_commit(stage, changes, force=force)\n stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n\n Dvcfile(self, stage.path).dump(stage, update_pipeline=False)\n return [s.stage for s in stages_info]\n", "path": "dvc/repo/commit.py"}]} | 1,518 | 176 |
gh_patches_debug_31064 | rasdani/github-patches | git_diff | SeldonIO__MLServer-916 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue in aggregating batched requests parameters
When sending batched requests with custom parameters (using extra field in [here](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/types/dataplane.py#L27)) like the following:
```python
# request 1
payload = types.InferenceRequest(
inputs=[
types.RequestInput(
name="parameters-np",
shape=[1],
datatype="BYTES",
data=[],
parameters=types.Parameters(
custom-param='value-1,
)
]
)
# request 2
payload = types.InferenceRequest(
inputs=[
types.RequestInput(
name="parameters-np",
shape=[1],
datatype="BYTES",
data=[],
parameters=types.Parameters(
custom-param='value-2',
)
]
)
```
the results param field for the batched inference request will only contain a single `custom-param` field with the last request value for that, this is because a [dictionary is used](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/batching/requests.py#L112) to aggregate parameters and for the case of having a same parameter with different values in the same batch only one value will be stored for them. E.g. in the case above we will only have `custom-param` equals to `value-2`. A better solution would be able to store all the values of common request parameter separately in a list e.g. `{custom-param=['value-1', 'value-2']`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/batching/requests.py`
Content:
```
1 from collections import defaultdict, OrderedDict
2 from typing import Dict, List, Optional, Union
3
4 from ..types import (
5 InferenceRequest,
6 InferenceResponse,
7 Parameters,
8 RequestInput,
9 RequestOutput,
10 ResponseOutput,
11 )
12 from .shape import Shape
13
14
15 def _get_data(payload: Union[RequestInput, ResponseOutput]):
16 return getattr(payload.data, "__root__", payload.data)
17
18
19 def _merge_parameters(
20 all_params: dict,
21 parametrised_obj: Union[
22 InferenceRequest, InferenceResponse, RequestInput, RequestOutput
23 ],
24 ) -> dict:
25 if not parametrised_obj.parameters:
26 return all_params
27
28 obj_params = parametrised_obj.parameters.dict()
29 return {**all_params, **obj_params}
30
31
32 def _merge_data(
33 all_data: Union[list, List[str], List[bytes]]
34 ) -> Union[list, str, bytes]:
35 sampled_datum = all_data[0]
36
37 if isinstance(sampled_datum, str):
38 return "".join(all_data) # type: ignore
39
40 if isinstance(sampled_datum, bytes):
41 return b"".join(all_data) # type: ignore
42
43 if isinstance(sampled_datum, list):
44 return sum(all_data, [])
45
46 # TODO: Should we raise an error if we couldn't merge the data?
47 return all_data
48
49
50 class BatchedRequests:
51 def __init__(self, inference_requests: Dict[str, InferenceRequest] = {}):
52 self.inference_requests = inference_requests
53
54 # External IDs represent the incoming prediction IDs that need to match
55 # 1:1 between request and response.
56 # Since we can't ensure the uniqueness (or even presence) of the
57 # external IDs, we'll also maintain our own list of internal IDs.
58 self._ids_mapping: Dict[str, Optional[str]] = OrderedDict()
59
60 # Minibatch here refers to the individual batch size of the input head
61 # of each input request (i.e. the number of datapoints on each input
62 # request)
63 self._minibatch_sizes: Dict[str, int] = OrderedDict()
64
65 self.merged_request = self._merge_requests()
66
67 def _merge_requests(self) -> InferenceRequest:
68 inputs_index: Dict[str, Dict[str, RequestInput]] = defaultdict(OrderedDict)
69 outputs_index: Dict[str, Dict[str, RequestOutput]] = defaultdict(OrderedDict)
70 all_params: dict = {}
71 has_outputs = False # if no outputs are defined, then outputs=None
72
73 for internal_id, inference_request in self.inference_requests.items():
74 self._ids_mapping[internal_id] = inference_request.id
75 all_params = _merge_parameters(all_params, inference_request)
76 for request_input in inference_request.inputs:
77 inputs_index[request_input.name][internal_id] = request_input
78
79 if inference_request.outputs is not None:
80 has_outputs = True
81 for request_output in inference_request.outputs:
82 outputs_index[request_output.name][internal_id] = request_output
83
84 inputs = [
85 self._merge_request_inputs(request_inputs)
86 for request_inputs in inputs_index.values()
87 ]
88
89 outputs = (
90 [
91 self._merge_request_outputs(request_outputs)
92 for request_outputs in outputs_index.values()
93 ]
94 if has_outputs
95 else None
96 )
97
98 # TODO: Should we add a 'fake' request ID?
99 params = Parameters(**all_params) if all_params else None
100 return InferenceRequest(inputs=inputs, outputs=outputs, parameters=params)
101
102 def _merge_request_inputs(
103 self, request_inputs: Dict[str, RequestInput]
104 ) -> RequestInput:
105 # Note that minibatch sizes could be different on each input head,
106 # however, to simplify the implementation, here we assume that it will
107 # be the same across all of them
108 batch_size = 0
109 all_data = []
110 all_params: dict = {}
111 for internal_id, request_input in request_inputs.items():
112 all_params = _merge_parameters(all_params, request_input)
113 all_data.append(_get_data(request_input))
114 minibatch_shape = Shape(request_input.shape)
115 self._minibatch_sizes[internal_id] = minibatch_shape.batch_size
116 batch_size += minibatch_shape.batch_size
117
118 data = _merge_data(all_data)
119 parameters = Parameters(**all_params) if all_params else None
120
121 # TODO: What should we do if list is empty?
122 sampled = next(iter(request_inputs.values()))
123 shape = Shape(sampled.shape)
124 shape.batch_size = batch_size
125
126 return RequestInput(
127 name=sampled.name,
128 datatype=sampled.datatype,
129 shape=shape.to_list(),
130 data=data,
131 parameters=parameters,
132 )
133
134 def _merge_request_outputs(
135 self, request_outputs: Dict[str, RequestOutput]
136 ) -> RequestOutput:
137 all_params: dict = {}
138 for internal_id, request_output in request_outputs.items():
139 all_params = _merge_parameters(all_params, request_output)
140
141 parameters = Parameters(**all_params) if all_params else None
142
143 # TODO: What should we do if list is empty?
144 sampled = next(iter(request_outputs.values()))
145
146 return RequestOutput(name=sampled.name, parameters=parameters)
147
148 def split_response(
149 self, batched_response: InferenceResponse
150 ) -> Dict[str, InferenceResponse]:
151 responses: Dict[str, InferenceResponse] = {}
152
153 for response_output in batched_response.outputs:
154 response_outputs = self._split_response_output(response_output)
155
156 for internal_id, response_output in response_outputs.items():
157 if internal_id not in responses:
158 responses[internal_id] = InferenceResponse(
159 id=self._ids_mapping[internal_id],
160 model_name=batched_response.model_name,
161 model_version=batched_response.model_version,
162 outputs=[],
163 parameters=batched_response.parameters,
164 )
165
166 responses[internal_id].outputs.append(response_output)
167
168 return responses
169
170 def _split_response_output(
171 self, response_output: ResponseOutput
172 ) -> Dict[str, ResponseOutput]:
173
174 all_data = self._split_data(response_output)
175 response_outputs = {}
176 for internal_id, data in all_data.items():
177 shape = Shape(response_output.shape)
178 shape.batch_size = self._minibatch_sizes[internal_id]
179 response_outputs[internal_id] = ResponseOutput(
180 name=response_output.name,
181 shape=shape.to_list(),
182 data=data,
183 datatype=response_output.datatype,
184 parameters=response_output.parameters,
185 )
186
187 return response_outputs
188
189 def _split_data(self, response_output: ResponseOutput) -> Dict[str, ResponseOutput]:
190 merged_shape = Shape(response_output.shape)
191 element_size = merged_shape.elem_size
192 merged_data = _get_data(response_output)
193 idx = 0
194
195 all_data = {}
196 # TODO: Don't rely on array to have been flattened
197 for internal_id, minibatch_size in self._minibatch_sizes.items():
198 data = merged_data[idx : idx + minibatch_size * element_size]
199 idx += minibatch_size * element_size
200 all_data[internal_id] = data
201
202 return all_data
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/batching/requests.py b/mlserver/batching/requests.py
--- a/mlserver/batching/requests.py
+++ b/mlserver/batching/requests.py
@@ -29,6 +29,40 @@
return {**all_params, **obj_params}
+def _merge_input_parameters(
+ all_params: dict,
+ parametrised_obj: Union[
+ InferenceRequest, InferenceResponse, RequestInput, RequestOutput
+ ],
+) -> dict:
+ if not parametrised_obj.parameters:
+ return all_params
+ obj_params = parametrised_obj.parameters.dict()
+ if all_params == {}:
+ return obj_params
+ else:
+ common_keys = set(all_params).intersection(set(obj_params)) - {
+ "content_type",
+ "headers",
+ }
+ uncommon_keys = set(all_params).union(set(obj_params)) - common_keys
+ new_all_params = {}
+ for key in common_keys:
+ if type(all_params[key]) == list:
+ new_value = all_params[key] + [obj_params[key]]
+ new_all_params[key] = new_value
+ else:
+ new_value = [all_params[key]]
+ new_value.append(obj_params[key])
+ new_all_params[key] = new_value
+ for key in uncommon_keys:
+ if key in all_params.keys():
+ new_all_params[key] = all_params[key]
+ if key in obj_params.keys():
+ new_all_params[key] = obj_params[key]
+ return new_all_params
+
+
def _merge_data(
all_data: Union[list, List[str], List[bytes]]
) -> Union[list, str, bytes]:
@@ -109,7 +143,7 @@
all_data = []
all_params: dict = {}
for internal_id, request_input in request_inputs.items():
- all_params = _merge_parameters(all_params, request_input)
+ all_params = _merge_input_parameters(all_params, request_input)
all_data.append(_get_data(request_input))
minibatch_shape = Shape(request_input.shape)
self._minibatch_sizes[internal_id] = minibatch_shape.batch_size
| {"golden_diff": "diff --git a/mlserver/batching/requests.py b/mlserver/batching/requests.py\n--- a/mlserver/batching/requests.py\n+++ b/mlserver/batching/requests.py\n@@ -29,6 +29,40 @@\n return {**all_params, **obj_params}\n \n \n+def _merge_input_parameters(\n+ all_params: dict,\n+ parametrised_obj: Union[\n+ InferenceRequest, InferenceResponse, RequestInput, RequestOutput\n+ ],\n+) -> dict:\n+ if not parametrised_obj.parameters:\n+ return all_params\n+ obj_params = parametrised_obj.parameters.dict()\n+ if all_params == {}:\n+ return obj_params\n+ else:\n+ common_keys = set(all_params).intersection(set(obj_params)) - {\n+ \"content_type\",\n+ \"headers\",\n+ }\n+ uncommon_keys = set(all_params).union(set(obj_params)) - common_keys\n+ new_all_params = {}\n+ for key in common_keys:\n+ if type(all_params[key]) == list:\n+ new_value = all_params[key] + [obj_params[key]]\n+ new_all_params[key] = new_value\n+ else:\n+ new_value = [all_params[key]]\n+ new_value.append(obj_params[key])\n+ new_all_params[key] = new_value\n+ for key in uncommon_keys:\n+ if key in all_params.keys():\n+ new_all_params[key] = all_params[key]\n+ if key in obj_params.keys():\n+ new_all_params[key] = obj_params[key]\n+ return new_all_params\n+\n+\n def _merge_data(\n all_data: Union[list, List[str], List[bytes]]\n ) -> Union[list, str, bytes]:\n@@ -109,7 +143,7 @@\n all_data = []\n all_params: dict = {}\n for internal_id, request_input in request_inputs.items():\n- all_params = _merge_parameters(all_params, request_input)\n+ all_params = _merge_input_parameters(all_params, request_input)\n all_data.append(_get_data(request_input))\n minibatch_shape = Shape(request_input.shape)\n self._minibatch_sizes[internal_id] = minibatch_shape.batch_size\n", "issue": "Issue in aggregating batched requests parameters\nWhen sending batched requests with custom parameters (using extra field in [here](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/types/dataplane.py#L27)) like the following:\r\n\r\n```python\r\n# request 1\r\npayload = types.InferenceRequest(\r\n inputs=[\r\n types.RequestInput(\r\n name=\"parameters-np\",\r\n shape=[1],\r\n datatype=\"BYTES\",\r\n data=[],\r\n parameters=types.Parameters(\r\n custom-param='value-1,\r\n )\r\n ]\r\n)\r\n# request 2\r\npayload = types.InferenceRequest(\r\n inputs=[\r\n types.RequestInput(\r\n name=\"parameters-np\",\r\n shape=[1],\r\n datatype=\"BYTES\",\r\n data=[],\r\n parameters=types.Parameters(\r\n custom-param='value-2',\r\n )\r\n ]\r\n)\r\n```\r\nthe results param field for the batched inference request will only contain a single `custom-param` field with the last request value for that, this is because a [dictionary is used](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/batching/requests.py#L112) to aggregate parameters and for the case of having a same parameter with different values in the same batch only one value will be stored for them. E.g. in the case above we will only have `custom-param` equals to `value-2`. A better solution would be able to store all the values of common request parameter separately in a list e.g. `{custom-param=['value-1', 'value-2']`\r\n\n", "before_files": [{"content": "from collections import defaultdict, OrderedDict\nfrom typing import Dict, List, Optional, Union\n\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n Parameters,\n RequestInput,\n RequestOutput,\n ResponseOutput,\n)\nfrom .shape import Shape\n\n\ndef _get_data(payload: Union[RequestInput, ResponseOutput]):\n return getattr(payload.data, \"__root__\", payload.data)\n\n\ndef _merge_parameters(\n all_params: dict,\n parametrised_obj: Union[\n InferenceRequest, InferenceResponse, RequestInput, RequestOutput\n ],\n) -> dict:\n if not parametrised_obj.parameters:\n return all_params\n\n obj_params = parametrised_obj.parameters.dict()\n return {**all_params, **obj_params}\n\n\ndef _merge_data(\n all_data: Union[list, List[str], List[bytes]]\n) -> Union[list, str, bytes]:\n sampled_datum = all_data[0]\n\n if isinstance(sampled_datum, str):\n return \"\".join(all_data) # type: ignore\n\n if isinstance(sampled_datum, bytes):\n return b\"\".join(all_data) # type: ignore\n\n if isinstance(sampled_datum, list):\n return sum(all_data, [])\n\n # TODO: Should we raise an error if we couldn't merge the data?\n return all_data\n\n\nclass BatchedRequests:\n def __init__(self, inference_requests: Dict[str, InferenceRequest] = {}):\n self.inference_requests = inference_requests\n\n # External IDs represent the incoming prediction IDs that need to match\n # 1:1 between request and response.\n # Since we can't ensure the uniqueness (or even presence) of the\n # external IDs, we'll also maintain our own list of internal IDs.\n self._ids_mapping: Dict[str, Optional[str]] = OrderedDict()\n\n # Minibatch here refers to the individual batch size of the input head\n # of each input request (i.e. the number of datapoints on each input\n # request)\n self._minibatch_sizes: Dict[str, int] = OrderedDict()\n\n self.merged_request = self._merge_requests()\n\n def _merge_requests(self) -> InferenceRequest:\n inputs_index: Dict[str, Dict[str, RequestInput]] = defaultdict(OrderedDict)\n outputs_index: Dict[str, Dict[str, RequestOutput]] = defaultdict(OrderedDict)\n all_params: dict = {}\n has_outputs = False # if no outputs are defined, then outputs=None\n\n for internal_id, inference_request in self.inference_requests.items():\n self._ids_mapping[internal_id] = inference_request.id\n all_params = _merge_parameters(all_params, inference_request)\n for request_input in inference_request.inputs:\n inputs_index[request_input.name][internal_id] = request_input\n\n if inference_request.outputs is not None:\n has_outputs = True\n for request_output in inference_request.outputs:\n outputs_index[request_output.name][internal_id] = request_output\n\n inputs = [\n self._merge_request_inputs(request_inputs)\n for request_inputs in inputs_index.values()\n ]\n\n outputs = (\n [\n self._merge_request_outputs(request_outputs)\n for request_outputs in outputs_index.values()\n ]\n if has_outputs\n else None\n )\n\n # TODO: Should we add a 'fake' request ID?\n params = Parameters(**all_params) if all_params else None\n return InferenceRequest(inputs=inputs, outputs=outputs, parameters=params)\n\n def _merge_request_inputs(\n self, request_inputs: Dict[str, RequestInput]\n ) -> RequestInput:\n # Note that minibatch sizes could be different on each input head,\n # however, to simplify the implementation, here we assume that it will\n # be the same across all of them\n batch_size = 0\n all_data = []\n all_params: dict = {}\n for internal_id, request_input in request_inputs.items():\n all_params = _merge_parameters(all_params, request_input)\n all_data.append(_get_data(request_input))\n minibatch_shape = Shape(request_input.shape)\n self._minibatch_sizes[internal_id] = minibatch_shape.batch_size\n batch_size += minibatch_shape.batch_size\n\n data = _merge_data(all_data)\n parameters = Parameters(**all_params) if all_params else None\n\n # TODO: What should we do if list is empty?\n sampled = next(iter(request_inputs.values()))\n shape = Shape(sampled.shape)\n shape.batch_size = batch_size\n\n return RequestInput(\n name=sampled.name,\n datatype=sampled.datatype,\n shape=shape.to_list(),\n data=data,\n parameters=parameters,\n )\n\n def _merge_request_outputs(\n self, request_outputs: Dict[str, RequestOutput]\n ) -> RequestOutput:\n all_params: dict = {}\n for internal_id, request_output in request_outputs.items():\n all_params = _merge_parameters(all_params, request_output)\n\n parameters = Parameters(**all_params) if all_params else None\n\n # TODO: What should we do if list is empty?\n sampled = next(iter(request_outputs.values()))\n\n return RequestOutput(name=sampled.name, parameters=parameters)\n\n def split_response(\n self, batched_response: InferenceResponse\n ) -> Dict[str, InferenceResponse]:\n responses: Dict[str, InferenceResponse] = {}\n\n for response_output in batched_response.outputs:\n response_outputs = self._split_response_output(response_output)\n\n for internal_id, response_output in response_outputs.items():\n if internal_id not in responses:\n responses[internal_id] = InferenceResponse(\n id=self._ids_mapping[internal_id],\n model_name=batched_response.model_name,\n model_version=batched_response.model_version,\n outputs=[],\n parameters=batched_response.parameters,\n )\n\n responses[internal_id].outputs.append(response_output)\n\n return responses\n\n def _split_response_output(\n self, response_output: ResponseOutput\n ) -> Dict[str, ResponseOutput]:\n\n all_data = self._split_data(response_output)\n response_outputs = {}\n for internal_id, data in all_data.items():\n shape = Shape(response_output.shape)\n shape.batch_size = self._minibatch_sizes[internal_id]\n response_outputs[internal_id] = ResponseOutput(\n name=response_output.name,\n shape=shape.to_list(),\n data=data,\n datatype=response_output.datatype,\n parameters=response_output.parameters,\n )\n\n return response_outputs\n\n def _split_data(self, response_output: ResponseOutput) -> Dict[str, ResponseOutput]:\n merged_shape = Shape(response_output.shape)\n element_size = merged_shape.elem_size\n merged_data = _get_data(response_output)\n idx = 0\n\n all_data = {}\n # TODO: Don't rely on array to have been flattened\n for internal_id, minibatch_size in self._minibatch_sizes.items():\n data = merged_data[idx : idx + minibatch_size * element_size]\n idx += minibatch_size * element_size\n all_data[internal_id] = data\n\n return all_data\n", "path": "mlserver/batching/requests.py"}], "after_files": [{"content": "from collections import defaultdict, OrderedDict\nfrom typing import Dict, List, Optional, Union\n\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n Parameters,\n RequestInput,\n RequestOutput,\n ResponseOutput,\n)\nfrom .shape import Shape\n\n\ndef _get_data(payload: Union[RequestInput, ResponseOutput]):\n return getattr(payload.data, \"__root__\", payload.data)\n\n\ndef _merge_parameters(\n all_params: dict,\n parametrised_obj: Union[\n InferenceRequest, InferenceResponse, RequestInput, RequestOutput\n ],\n) -> dict:\n if not parametrised_obj.parameters:\n return all_params\n\n obj_params = parametrised_obj.parameters.dict()\n return {**all_params, **obj_params}\n\n\ndef _merge_input_parameters(\n all_params: dict,\n parametrised_obj: Union[\n InferenceRequest, InferenceResponse, RequestInput, RequestOutput\n ],\n) -> dict:\n if not parametrised_obj.parameters:\n return all_params\n obj_params = parametrised_obj.parameters.dict()\n if all_params == {}:\n return obj_params\n else:\n common_keys = set(all_params).intersection(set(obj_params)) - {\n \"content_type\",\n \"headers\",\n }\n uncommon_keys = set(all_params).union(set(obj_params)) - common_keys\n new_all_params = {}\n for key in common_keys:\n if type(all_params[key]) == list:\n new_value = all_params[key] + [obj_params[key]]\n new_all_params[key] = new_value\n else:\n new_value = [all_params[key]]\n new_value.append(obj_params[key])\n new_all_params[key] = new_value\n for key in uncommon_keys:\n if key in all_params.keys():\n new_all_params[key] = all_params[key]\n if key in obj_params.keys():\n new_all_params[key] = obj_params[key]\n return new_all_params\n\n\ndef _merge_data(\n all_data: Union[list, List[str], List[bytes]]\n) -> Union[list, str, bytes]:\n sampled_datum = all_data[0]\n\n if isinstance(sampled_datum, str):\n return \"\".join(all_data) # type: ignore\n\n if isinstance(sampled_datum, bytes):\n return b\"\".join(all_data) # type: ignore\n\n if isinstance(sampled_datum, list):\n return sum(all_data, [])\n\n # TODO: Should we raise an error if we couldn't merge the data?\n return all_data\n\n\nclass BatchedRequests:\n def __init__(self, inference_requests: Dict[str, InferenceRequest] = {}):\n self.inference_requests = inference_requests\n\n # External IDs represent the incoming prediction IDs that need to match\n # 1:1 between request and response.\n # Since we can't ensure the uniqueness (or even presence) of the\n # external IDs, we'll also maintain our own list of internal IDs.\n self._ids_mapping: Dict[str, Optional[str]] = OrderedDict()\n\n # Minibatch here refers to the individual batch size of the input head\n # of each input request (i.e. the number of datapoints on each input\n # request)\n self._minibatch_sizes: Dict[str, int] = OrderedDict()\n\n self.merged_request = self._merge_requests()\n\n def _merge_requests(self) -> InferenceRequest:\n inputs_index: Dict[str, Dict[str, RequestInput]] = defaultdict(OrderedDict)\n outputs_index: Dict[str, Dict[str, RequestOutput]] = defaultdict(OrderedDict)\n all_params: dict = {}\n has_outputs = False # if no outputs are defined, then outputs=None\n\n for internal_id, inference_request in self.inference_requests.items():\n self._ids_mapping[internal_id] = inference_request.id\n all_params = _merge_parameters(all_params, inference_request)\n for request_input in inference_request.inputs:\n inputs_index[request_input.name][internal_id] = request_input\n\n if inference_request.outputs is not None:\n has_outputs = True\n for request_output in inference_request.outputs:\n outputs_index[request_output.name][internal_id] = request_output\n\n inputs = [\n self._merge_request_inputs(request_inputs)\n for request_inputs in inputs_index.values()\n ]\n\n outputs = (\n [\n self._merge_request_outputs(request_outputs)\n for request_outputs in outputs_index.values()\n ]\n if has_outputs\n else None\n )\n\n # TODO: Should we add a 'fake' request ID?\n params = Parameters(**all_params) if all_params else None\n return InferenceRequest(inputs=inputs, outputs=outputs, parameters=params)\n\n def _merge_request_inputs(\n self, request_inputs: Dict[str, RequestInput]\n ) -> RequestInput:\n # Note that minibatch sizes could be different on each input head,\n # however, to simplify the implementation, here we assume that it will\n # be the same across all of them\n batch_size = 0\n all_data = []\n all_params: dict = {}\n for internal_id, request_input in request_inputs.items():\n all_params = _merge_input_parameters(all_params, request_input)\n all_data.append(_get_data(request_input))\n minibatch_shape = Shape(request_input.shape)\n self._minibatch_sizes[internal_id] = minibatch_shape.batch_size\n batch_size += minibatch_shape.batch_size\n\n data = _merge_data(all_data)\n parameters = Parameters(**all_params) if all_params else None\n\n # TODO: What should we do if list is empty?\n sampled = next(iter(request_inputs.values()))\n shape = Shape(sampled.shape)\n shape.batch_size = batch_size\n\n return RequestInput(\n name=sampled.name,\n datatype=sampled.datatype,\n shape=shape.to_list(),\n data=data,\n parameters=parameters,\n )\n\n def _merge_request_outputs(\n self, request_outputs: Dict[str, RequestOutput]\n ) -> RequestOutput:\n all_params: dict = {}\n for internal_id, request_output in request_outputs.items():\n all_params = _merge_parameters(all_params, request_output)\n\n parameters = Parameters(**all_params) if all_params else None\n\n # TODO: What should we do if list is empty?\n sampled = next(iter(request_outputs.values()))\n\n return RequestOutput(name=sampled.name, parameters=parameters)\n\n def split_response(\n self, batched_response: InferenceResponse\n ) -> Dict[str, InferenceResponse]:\n responses: Dict[str, InferenceResponse] = {}\n\n for response_output in batched_response.outputs:\n response_outputs = self._split_response_output(response_output)\n\n for internal_id, response_output in response_outputs.items():\n if internal_id not in responses:\n responses[internal_id] = InferenceResponse(\n id=self._ids_mapping[internal_id],\n model_name=batched_response.model_name,\n model_version=batched_response.model_version,\n outputs=[],\n parameters=batched_response.parameters,\n )\n\n responses[internal_id].outputs.append(response_output)\n\n return responses\n\n def _split_response_output(\n self, response_output: ResponseOutput\n ) -> Dict[str, ResponseOutput]:\n\n all_data = self._split_data(response_output)\n response_outputs = {}\n for internal_id, data in all_data.items():\n shape = Shape(response_output.shape)\n shape.batch_size = self._minibatch_sizes[internal_id]\n response_outputs[internal_id] = ResponseOutput(\n name=response_output.name,\n shape=shape.to_list(),\n data=data,\n datatype=response_output.datatype,\n parameters=response_output.parameters,\n )\n\n return response_outputs\n\n def _split_data(self, response_output: ResponseOutput) -> Dict[str, ResponseOutput]:\n merged_shape = Shape(response_output.shape)\n element_size = merged_shape.elem_size\n merged_data = _get_data(response_output)\n idx = 0\n\n all_data = {}\n # TODO: Don't rely on array to have been flattened\n for internal_id, minibatch_size in self._minibatch_sizes.items():\n data = merged_data[idx : idx + minibatch_size * element_size]\n idx += minibatch_size * element_size\n all_data[internal_id] = data\n\n return all_data\n", "path": "mlserver/batching/requests.py"}]} | 2,700 | 483 |
gh_patches_debug_9015 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When a logged-in source logs in again, our informational message points the wrong way
## Description
In the source interface, when a source who is already logged in logs in again, they see a message saying "You are already logged in. Please verify your codename below as it may differ from the one displayed on the previous page." But the "show codename" tool is now located _above_ the message.
## Steps to Reproduce
(Cribbed from the [1.3.0 test plan](https://github.com/freedomofpress/securedrop/wiki/1.3.0-Test-plan#5075).)
- Open the Source Interface in two different tabs (tab A, tab B) and navigate to `/generate` in both:
- Verify that two different source codenames are displayed, and note them
- Click **Submit Documents** in tab A:
- The `/lookup` page is displayed, and when **Show** is clicked in the codename hint, the tab A codename is displayed
- Click **Submit Documents** in tab B:
- The `/lookup` page is displayed with a blue flash message saying "You are already logged in. Please verify your codename below as it may differ from the one displayed on the previous page."
## Expected Behavior
That we would direct the source to the right place to verify their codename.
## Actual Behavior
We misdirect them, grinning maniacally and chortling to ourselves at our ingenious trickery:

## Comments
We should probably replace "below" with "above" in the message. Or move the codename widget. Something. I don't know, it's hard.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/main.py`
Content:
```
1 import operator
2 import os
3 import io
4
5 from base64 import urlsafe_b64encode
6 from datetime import datetime
7 from flask import (Blueprint, render_template, flash, redirect, url_for, g,
8 session, current_app, request, Markup, abort)
9 from flask_babel import gettext
10 from sqlalchemy.exc import IntegrityError
11
12 import store
13
14 from db import db
15 from models import Source, Submission, Reply, get_one_or_else
16 from source_app.decorators import login_required
17 from source_app.utils import (logged_in, generate_unique_codename,
18 async_genkey, normalize_timestamps,
19 valid_codename, get_entropy_estimate)
20 from source_app.forms import LoginForm
21
22
23 def make_blueprint(config):
24 view = Blueprint('main', __name__)
25
26 @view.route('/')
27 def index():
28 return render_template('index.html')
29
30 @view.route('/generate', methods=('GET', 'POST'))
31 def generate():
32 if logged_in():
33 flash(gettext(
34 "You were redirected because you are already logged in. "
35 "If you want to create a new account, you should log out "
36 "first."),
37 "notification")
38 return redirect(url_for('.lookup'))
39
40 codename = generate_unique_codename(config)
41
42 # Generate a unique id for each browser tab and associate the codename with this id.
43 # This will allow retrieval of the codename displayed in the tab from which the source has
44 # clicked to proceed to /generate (ref. issue #4458)
45 tab_id = urlsafe_b64encode(os.urandom(64)).decode()
46 codenames = session.get('codenames', {})
47 codenames[tab_id] = codename
48 session['codenames'] = codenames
49
50 session['new_user'] = True
51 return render_template('generate.html', codename=codename, tab_id=tab_id)
52
53 @view.route('/org-logo')
54 def select_logo():
55 if os.path.exists(os.path.join(current_app.static_folder, 'i',
56 'custom_logo.png')):
57 return redirect(url_for('static', filename='i/custom_logo.png'))
58 else:
59 return redirect(url_for('static', filename='i/logo.png'))
60
61 @view.route('/create', methods=['POST'])
62 def create():
63 if session.get('logged_in', False):
64 flash(gettext("You are already logged in. Please verify your codename below as it " +
65 "may differ from the one displayed on the previous page."),
66 'notification')
67 else:
68 tab_id = request.form['tab_id']
69 codename = session['codenames'][tab_id]
70 session['codename'] = codename
71
72 del session['codenames']
73
74 filesystem_id = current_app.crypto_util.hash_codename(codename)
75
76 source = Source(filesystem_id, current_app.crypto_util.display_id())
77 db.session.add(source)
78 try:
79 db.session.commit()
80 except IntegrityError as e:
81 db.session.rollback()
82 current_app.logger.error(
83 "Attempt to create a source with duplicate codename: %s" %
84 (e,))
85
86 # Issue 2386: don't log in on duplicates
87 del session['codename']
88
89 # Issue 4361: Delete 'logged_in' if it's in the session
90 try:
91 del session['logged_in']
92 except KeyError:
93 pass
94
95 abort(500)
96 else:
97 os.mkdir(current_app.storage.path(filesystem_id))
98
99 session['logged_in'] = True
100 return redirect(url_for('.lookup'))
101
102 @view.route('/lookup', methods=('GET',))
103 @login_required
104 def lookup():
105 replies = []
106 source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \
107 .filter(Reply.deleted_by_source == False).all() # noqa
108
109 for reply in source_inbox:
110 reply_path = current_app.storage.path(
111 g.filesystem_id,
112 reply.filename,
113 )
114 try:
115 with io.open(reply_path, "rb") as f:
116 contents = f.read()
117 reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
118 reply.decrypted = reply_obj
119 except UnicodeDecodeError:
120 current_app.logger.error("Could not decode reply %s" %
121 reply.filename)
122 else:
123 reply.date = datetime.utcfromtimestamp(
124 os.stat(reply_path).st_mtime)
125 replies.append(reply)
126
127 # Sort the replies by date
128 replies.sort(key=operator.attrgetter('date'), reverse=True)
129
130 # Generate a keypair to encrypt replies from the journalist
131 # Only do this if the journalist has flagged the source as one
132 # that they would like to reply to. (Issue #140.)
133 if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \
134 g.source.flagged:
135 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']
136 async_genkey(current_app.crypto_util,
137 db_uri,
138 g.filesystem_id,
139 g.codename)
140
141 return render_template(
142 'lookup.html',
143 allow_document_uploads=current_app.instance_config.allow_document_uploads,
144 codename=g.codename,
145 replies=replies,
146 flagged=g.source.flagged,
147 new_user=session.get('new_user', None),
148 haskey=current_app.crypto_util.get_fingerprint(
149 g.filesystem_id))
150
151 @view.route('/submit', methods=('POST',))
152 @login_required
153 def submit():
154 allow_document_uploads = current_app.instance_config.allow_document_uploads
155 msg = request.form['msg']
156 fh = None
157 if allow_document_uploads and 'fh' in request.files:
158 fh = request.files['fh']
159
160 # Don't submit anything if it was an "empty" submission. #878
161 if not (msg or fh):
162 if allow_document_uploads:
163 flash(gettext(
164 "You must enter a message or choose a file to submit."),
165 "error")
166 else:
167 flash(gettext("You must enter a message."), "error")
168 return redirect(url_for('main.lookup'))
169
170 fnames = []
171 journalist_filename = g.source.journalist_filename
172 first_submission = g.source.interaction_count == 0
173
174 if msg:
175 g.source.interaction_count += 1
176 fnames.append(
177 current_app.storage.save_message_submission(
178 g.filesystem_id,
179 g.source.interaction_count,
180 journalist_filename,
181 msg))
182 if fh:
183 g.source.interaction_count += 1
184 fnames.append(
185 current_app.storage.save_file_submission(
186 g.filesystem_id,
187 g.source.interaction_count,
188 journalist_filename,
189 fh.filename,
190 fh.stream))
191
192 if first_submission:
193 msg = render_template('first_submission_flashed_message.html')
194 flash(Markup(msg), "success")
195
196 else:
197 if msg and not fh:
198 html_contents = gettext('Thanks! We received your message.')
199 elif not msg and fh:
200 html_contents = gettext('Thanks! We received your document.')
201 else:
202 html_contents = gettext('Thanks! We received your message and '
203 'document.')
204
205 msg = render_template('next_submission_flashed_message.html',
206 html_contents=html_contents)
207 flash(Markup(msg), "success")
208
209 new_submissions = []
210 for fname in fnames:
211 submission = Submission(g.source, fname)
212 db.session.add(submission)
213 new_submissions.append(submission)
214
215 if g.source.pending:
216 g.source.pending = False
217
218 # Generate a keypair now, if there's enough entropy (issue #303)
219 # (gpg reads 300 bytes from /dev/random)
220 entropy_avail = get_entropy_estimate()
221 if entropy_avail >= 2400:
222 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']
223
224 async_genkey(current_app.crypto_util,
225 db_uri,
226 g.filesystem_id,
227 g.codename)
228 current_app.logger.info("generating key, entropy: {}".format(
229 entropy_avail))
230 else:
231 current_app.logger.warn(
232 "skipping key generation. entropy: {}".format(
233 entropy_avail))
234
235 g.source.last_updated = datetime.utcnow()
236 db.session.commit()
237
238 for sub in new_submissions:
239 store.async_add_checksum_for_file(sub)
240
241 normalize_timestamps(g.filesystem_id)
242
243 return redirect(url_for('main.lookup'))
244
245 @view.route('/delete', methods=('POST',))
246 @login_required
247 def delete():
248 """This deletes the reply from the source's inbox, but preserves
249 the history for journalists such that they can view conversation
250 history.
251 """
252
253 query = Reply.query.filter_by(
254 filename=request.form['reply_filename'],
255 source_id=g.source.id)
256 reply = get_one_or_else(query, current_app.logger, abort)
257 reply.deleted_by_source = True
258 db.session.add(reply)
259 db.session.commit()
260
261 flash(gettext("Reply deleted"), "notification")
262 return redirect(url_for('.lookup'))
263
264 @view.route('/delete-all', methods=('POST',))
265 @login_required
266 def batch_delete():
267 replies = Reply.query.filter(Reply.source_id == g.source.id) \
268 .filter(Reply.deleted_by_source == False).all() # noqa
269 if len(replies) == 0:
270 current_app.logger.error("Found no replies when at least one was "
271 "expected")
272 return redirect(url_for('.lookup'))
273
274 for reply in replies:
275 reply.deleted_by_source = True
276 db.session.add(reply)
277 db.session.commit()
278
279 flash(gettext("All replies have been deleted"), "notification")
280 return redirect(url_for('.lookup'))
281
282 @view.route('/login', methods=('GET', 'POST'))
283 def login():
284 form = LoginForm()
285 if form.validate_on_submit():
286 codename = request.form['codename'].strip()
287 if valid_codename(codename):
288 session.update(codename=codename, logged_in=True)
289 return redirect(url_for('.lookup', from_login='1'))
290 else:
291 current_app.logger.info(
292 "Login failed for invalid codename")
293 flash(gettext("Sorry, that is not a recognized codename."),
294 "error")
295 return render_template('login.html', form=form)
296
297 @view.route('/logout')
298 def logout():
299 """
300 If a user is logged in, show them a logout page that prompts them to
301 click the New Identity button in Tor Browser to complete their session.
302 Otherwise redirect to the main Source Interface page.
303 """
304 if logged_in():
305
306 # Clear the session after we render the message so it's localized
307 # If a user specified a locale, save it and restore it
308 user_locale = g.locale
309 session.clear()
310 session['locale'] = user_locale
311
312 return render_template('logout.html')
313 else:
314 return redirect(url_for('.index'))
315
316 return view
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py
--- a/securedrop/source_app/main.py
+++ b/securedrop/source_app/main.py
@@ -61,7 +61,7 @@
@view.route('/create', methods=['POST'])
def create():
if session.get('logged_in', False):
- flash(gettext("You are already logged in. Please verify your codename below as it " +
+ flash(gettext("You are already logged in. Please verify your codename above as it " +
"may differ from the one displayed on the previous page."),
'notification')
else:
| {"golden_diff": "diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py\n--- a/securedrop/source_app/main.py\n+++ b/securedrop/source_app/main.py\n@@ -61,7 +61,7 @@\n @view.route('/create', methods=['POST'])\n def create():\n if session.get('logged_in', False):\n- flash(gettext(\"You are already logged in. Please verify your codename below as it \" +\n+ flash(gettext(\"You are already logged in. Please verify your codename above as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n", "issue": "When a logged-in source logs in again, our informational message points the wrong way\n## Description\r\n\r\nIn the source interface, when a source who is already logged in logs in again, they see a message saying \"You are already logged in. Please verify your codename below as it may differ from the one displayed on the previous page.\" But the \"show codename\" tool is now located _above_ the message.\r\n\r\n## Steps to Reproduce\r\n\r\n(Cribbed from the [1.3.0 test plan](https://github.com/freedomofpress/securedrop/wiki/1.3.0-Test-plan#5075).)\r\n\r\n- Open the Source Interface in two different tabs (tab A, tab B) and navigate to `/generate` in both:\r\n - Verify that two different source codenames are displayed, and note them\r\n- Click **Submit Documents** in tab A:\r\n - The `/lookup` page is displayed, and when **Show** is clicked in the codename hint, the tab A codename is displayed\r\n- Click **Submit Documents** in tab B:\r\n - The `/lookup` page is displayed with a blue flash message saying \"You are already logged in. Please verify your codename below as it may differ from the one displayed on the previous page.\"\r\n\r\n## Expected Behavior\r\n\r\nThat we would direct the source to the right place to verify their codename.\r\n\r\n## Actual Behavior\r\n\r\nWe misdirect them, grinning maniacally and chortling to ourselves at our ingenious trickery:\r\n\r\n\r\n\r\n## Comments\r\n\r\nWe should probably replace \"below\" with \"above\" in the message. Or move the codename widget. Something. I don't know, it's hard.\r\n\n", "before_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = codenames\n\n session['new_user'] = True\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n if session.get('logged_in', False):\n flash(gettext(\"You are already logged in. Please verify your codename below as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n session['codename'] = codename\n\n del session['codenames']\n\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n allow_document_uploads=current_app.instance_config.allow_document_uploads,\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.get_fingerprint(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n allow_document_uploads = current_app.instance_config.allow_document_uploads\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n \"\"\"\n If a user is logged in, show them a logout page that prompts them to\n click the New Identity button in Tor Browser to complete their session.\n Otherwise redirect to the main Source Interface page.\n \"\"\"\n if logged_in():\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n return render_template('logout.html')\n else:\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}], "after_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = codenames\n\n session['new_user'] = True\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n if session.get('logged_in', False):\n flash(gettext(\"You are already logged in. Please verify your codename above as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n session['codename'] = codename\n\n del session['codenames']\n\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n allow_document_uploads=current_app.instance_config.allow_document_uploads,\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.get_fingerprint(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n allow_document_uploads = current_app.instance_config.allow_document_uploads\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n \"\"\"\n If a user is logged in, show them a logout page that prompts them to\n click the New Identity button in Tor Browser to complete their session.\n Otherwise redirect to the main Source Interface page.\n \"\"\"\n if logged_in():\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n return render_template('logout.html')\n else:\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}]} | 3,867 | 141 |
gh_patches_debug_31585 | rasdani/github-patches | git_diff | azavea__raster-vision-1079 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle edge case: reading a window that overflows the RasterSource's extent that has been artificially cropped using extent_crop can cause data leakage
If, for example, there are two `RasterSource`'s with cropped extents such that they both respectively read from the left and right halves of the same underlying image, there will be instances at the boundary where windows straddle both halves. This is not an issue if the two halves are both in the training split or both in the validation split, but if they are in different splits, there will be data leakage.
**Possible solution**
`RasterSource` should fill the overflowing regions in the window with NODATA values if `extent_crop` is set. This can be a configurable option so that it can be enabled only when the image is part of scenes from different data splits.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision_core/rastervision/core/data/raster_source/rasterio_source.py`
Content:
```
1 import logging
2 import math
3 import os
4 from pyproj import Transformer
5 import subprocess
6 from decimal import Decimal
7 import tempfile
8 from typing import Optional
9
10 import numpy as np
11 import rasterio
12 from rasterio.enums import (ColorInterp, MaskFlags)
13
14 from rastervision.pipeline.file_system import download_if_needed
15 from rastervision.core.box import Box
16 from rastervision.core.data.crs_transformer import RasterioCRSTransformer
17 from rastervision.core.data.raster_source import (RasterSource, CropOffsets)
18 from rastervision.core.data import (ActivateMixin, ActivationError)
19
20 log = logging.getLogger(__name__)
21 wgs84 = 'epsg:4326'
22 meters_per_degree = 111319.5
23
24
25 def build_vrt(vrt_path, image_paths):
26 """Build a VRT for a set of TIFF files."""
27 cmd = ['gdalbuildvrt', vrt_path]
28 cmd.extend(image_paths)
29 subprocess.run(cmd)
30
31
32 def download_and_build_vrt(image_uris, tmp_dir):
33 log.info('Building VRT...')
34 image_paths = [download_if_needed(uri, tmp_dir) for uri in image_uris]
35 image_path = os.path.join(tmp_dir, 'index.vrt')
36 build_vrt(image_path, image_paths)
37 return image_path
38
39
40 def stream_and_build_vrt(images_uris, tmp_dir):
41 log.info('Building VRT...')
42 image_paths = images_uris
43 image_path = os.path.join(tmp_dir, 'index.vrt')
44 build_vrt(image_path, image_paths)
45 return image_path
46
47
48 def load_window(image_dataset, window=None, is_masked=False):
49 """Load a window of an image using Rasterio.
50
51 Args:
52 image_dataset: a Rasterio dataset
53 window: ((row_start, row_stop), (col_start, col_stop)) or
54 ((y_min, y_max), (x_min, x_max))
55 is_masked: If True, read a masked array from rasterio
56
57 Returns:
58 np.ndarray of shape (height, width, channels) where channels is the number of
59 channels in the image_dataset.
60 """
61 if is_masked:
62 im = image_dataset.read(window=window, boundless=True, masked=True)
63 im = np.ma.filled(im, fill_value=0)
64 else:
65 im = image_dataset.read(window=window, boundless=True)
66
67 # Handle non-zero NODATA values by setting the data to 0.
68 for channel, nodata in enumerate(image_dataset.nodatavals):
69 if nodata is not None and nodata != 0:
70 im[channel, im[channel] == nodata] = 0
71
72 im = np.transpose(im, axes=[1, 2, 0])
73 return im
74
75
76 class RasterioSource(ActivateMixin, RasterSource):
77 def __init__(self,
78 uris,
79 raster_transformers,
80 tmp_dir,
81 allow_streaming=False,
82 channel_order=None,
83 x_shift=0.0,
84 y_shift=0.0,
85 extent_crop: Optional[CropOffsets] = None):
86 """Constructor.
87
88 This RasterSource can read any file that can be opened by Rasterio/GDAL
89 including georeferenced formats such as GeoTIFF and non-georeferenced formats
90 such as JPG. See https://www.gdal.org/formats_list.html for more details.
91
92 If channel_order is None, then use non-alpha channels. This also sets any
93 masked or NODATA pixel values to be zeros.
94
95 Args:
96 channel_order: list of indices of channels to extract from raw imagery
97 extent_crop (CropOffsets, optional): Relative
98 offsets (top, left, bottom, right) for cropping the extent.
99 Useful for using splitting a scene into different datasets.
100 Defaults to None i.e. no cropping.
101 """
102 self.uris = uris
103 self.tmp_dir = tmp_dir
104 self.image_tmp_dir = None
105 self.image_dataset = None
106 self.x_shift = x_shift
107 self.y_shift = y_shift
108 self.do_shift = self.x_shift != 0.0 or self.y_shift != 0.0
109 self.allow_streaming = allow_streaming
110 self.extent_crop = extent_crop
111
112 num_channels = None
113
114 # Activate in order to get information out of the raster
115 with self.activate():
116 num_channels = self.image_dataset.count
117 if channel_order is None:
118 colorinterp = self.image_dataset.colorinterp
119 if colorinterp:
120 channel_order = [
121 i for i, color_interp in enumerate(colorinterp)
122 if color_interp != ColorInterp.alpha
123 ]
124 else:
125 channel_order = list(range(0, num_channels))
126 self.validate_channel_order(channel_order, num_channels)
127
128 mask_flags = self.image_dataset.mask_flag_enums
129 self.is_masked = any(
130 [m for m in mask_flags if m != MaskFlags.all_valid])
131
132 self.height = self.image_dataset.height
133 self.width = self.image_dataset.width
134
135 # Get 1x1 chip and apply raster transformers to test dtype.
136 test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))
137 test_chip = test_chip[:, :, channel_order]
138 for transformer in raster_transformers:
139 test_chip = transformer.transform(test_chip, channel_order)
140 self.dtype = test_chip.dtype
141
142 self._set_crs_transformer()
143
144 super().__init__(channel_order, num_channels, raster_transformers)
145
146 def _download_data(self, tmp_dir):
147 """Download any data needed for this Raster Source.
148
149 Return a single local path representing the image or a VRT of the data.
150 """
151 if len(self.uris) == 1:
152 if self.allow_streaming:
153 return self.uris[0]
154 else:
155 return download_if_needed(self.uris[0], tmp_dir)
156 else:
157 if self.allow_streaming:
158 return stream_and_build_vrt(self.uris, tmp_dir)
159 else:
160 return download_and_build_vrt(self.uris, tmp_dir)
161
162 def get_crs_transformer(self):
163 return self.crs_transformer
164
165 def get_extent(self):
166 h, w = self.height, self.width
167 if self.extent_crop is not None:
168 skip_top, skip_left, skip_bottom, skip_right = self.extent_crop
169 ymin, xmin = int(h * skip_top), int(w * skip_left)
170 ymax, xmax = h - int(h * skip_bottom), w - int(w * skip_right)
171 return Box(ymin, xmin, ymax, xmax)
172 return Box(0, 0, h, w)
173
174 def get_dtype(self):
175 """Return the numpy.dtype of this scene"""
176 return self.dtype
177
178 def _get_chip(self, window):
179 if self.image_dataset is None:
180 raise ActivationError('RasterSource must be activated before use')
181 shifted_window = self._get_shifted_window(window)
182 return load_window(
183 self.image_dataset,
184 window=shifted_window.rasterio_format(),
185 is_masked=self.is_masked)
186
187 def _activate(self):
188 # Download images to temporary directory and delete them when done.
189 self.image_tmp_dir = tempfile.TemporaryDirectory(dir=self.tmp_dir)
190 self.imagery_path = self._download_data(self.image_tmp_dir.name)
191 self.image_dataset = rasterio.open(self.imagery_path)
192 self._set_crs_transformer()
193
194 def _set_crs_transformer(self):
195 self.crs_transformer = RasterioCRSTransformer.from_dataset(
196 self.image_dataset)
197 crs = self.image_dataset.crs
198 self.to_wgs84 = None
199 self.from_wgs84 = None
200 if crs and self.do_shift:
201 self.to_wgs84 = Transformer.from_crs(
202 crs.wkt, wgs84, always_xy=True)
203 self.from_wgs84 = Transformer.from_crs(
204 wgs84, crs.wkt, always_xy=True)
205
206 def _deactivate(self):
207 self.image_dataset.close()
208 self.image_dataset = None
209 self.image_tmp_dir.cleanup()
210 self.image_tmp_dir = None
211
212 def _get_shifted_window(self, window):
213 do_shift = self.x_shift != 0.0 or self.y_shift != 0.0
214 if do_shift:
215 ymin, xmin, ymax, xmax = window.tuple_format()
216 width = window.get_width()
217 height = window.get_height()
218
219 # Transform image coordinates into world coordinates
220 transform = self.image_dataset.transform
221 xmin2, ymin2 = transform * (xmin, ymin)
222
223 # Transform from world coordinates to WGS84
224 if self.to_wgs84:
225 lon, lat = self.to_wgs84.transform(xmin2, ymin2)
226 else:
227 lon, lat = xmin2, ymin2
228
229 # Shift. This is performed by computing the shifts in
230 # meters to shifts in degrees. Those shifts are then
231 # applied to the WGS84 coordinate.
232 #
233 # Courtesy of https://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters # noqa
234 lat_radians = math.pi * lat / 180.0
235 dlon = Decimal(self.x_shift) / Decimal(
236 meters_per_degree * math.cos(lat_radians))
237 dlat = Decimal(self.y_shift) / Decimal(meters_per_degree)
238 lon = float(Decimal(lon) + dlon)
239 lat = float(Decimal(lat) + dlat)
240
241 # Transform from WGS84 to world coordinates
242 if self.from_wgs84:
243 xmin3, ymin3 = self.from_wgs84.transform(lon, lat)
244 else:
245 xmin3, ymin3 = lon, lat
246
247 # Trasnform from world coordinates back into image coordinates
248 xmin4, ymin4 = ~transform * (xmin3, ymin3)
249
250 window = Box(ymin4, xmin4, ymin4 + height, xmin4 + width)
251 return window
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py
--- a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py
+++ b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py
@@ -73,6 +73,26 @@
return im
+def fill_overflow(extent: Box,
+ window: Box,
+ arr: np.ndarray,
+ fill_value: int = 0) -> np.ndarray:
+ """Given a window and corresponding array of values, if the window
+ overflows the extent, fill the overflowing regions with fill_value.
+ """
+ top_overflow = max(0, extent.ymin - window.ymin)
+ bottom_overflow = max(0, window.ymax - extent.ymax)
+ left_overflow = max(0, extent.xmin - window.xmin)
+ right_overflow = max(0, window.xmax - extent.xmax)
+
+ h, w = arr.shape[:2]
+ arr[:top_overflow] = fill_value
+ arr[h - bottom_overflow:] = fill_value
+ arr[:, :left_overflow] = fill_value
+ arr[:, w - right_overflow:] = fill_value
+ return arr
+
+
class RasterioSource(ActivateMixin, RasterSource):
def __init__(self,
uris,
@@ -175,14 +195,17 @@
"""Return the numpy.dtype of this scene"""
return self.dtype
- def _get_chip(self, window):
+ def _get_chip(self, window: Box) -> np.ndarray:
if self.image_dataset is None:
raise ActivationError('RasterSource must be activated before use')
shifted_window = self._get_shifted_window(window)
- return load_window(
+ chip = load_window(
self.image_dataset,
window=shifted_window.rasterio_format(),
is_masked=self.is_masked)
+ if self.extent_crop is not None:
+ chip = fill_overflow(self.get_extent(), window, chip)
+ return chip
def _activate(self):
# Download images to temporary directory and delete them when done.
| {"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py\n--- a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py\n+++ b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py\n@@ -73,6 +73,26 @@\n return im\n \n \n+def fill_overflow(extent: Box,\n+ window: Box,\n+ arr: np.ndarray,\n+ fill_value: int = 0) -> np.ndarray:\n+ \"\"\"Given a window and corresponding array of values, if the window\n+ overflows the extent, fill the overflowing regions with fill_value.\n+ \"\"\"\n+ top_overflow = max(0, extent.ymin - window.ymin)\n+ bottom_overflow = max(0, window.ymax - extent.ymax)\n+ left_overflow = max(0, extent.xmin - window.xmin)\n+ right_overflow = max(0, window.xmax - extent.xmax)\n+\n+ h, w = arr.shape[:2]\n+ arr[:top_overflow] = fill_value\n+ arr[h - bottom_overflow:] = fill_value\n+ arr[:, :left_overflow] = fill_value\n+ arr[:, w - right_overflow:] = fill_value\n+ return arr\n+\n+\n class RasterioSource(ActivateMixin, RasterSource):\n def __init__(self,\n uris,\n@@ -175,14 +195,17 @@\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n \n- def _get_chip(self, window):\n+ def _get_chip(self, window: Box) -> np.ndarray:\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n shifted_window = self._get_shifted_window(window)\n- return load_window(\n+ chip = load_window(\n self.image_dataset,\n window=shifted_window.rasterio_format(),\n is_masked=self.is_masked)\n+ if self.extent_crop is not None:\n+ chip = fill_overflow(self.get_extent(), window, chip)\n+ return chip\n \n def _activate(self):\n # Download images to temporary directory and delete them when done.\n", "issue": "Handle edge case: reading a window that overflows the RasterSource's extent that has been artificially cropped using extent_crop can cause data leakage\nIf, for example, there are two `RasterSource`'s with cropped extents such that they both respectively read from the left and right halves of the same underlying image, there will be instances at the boundary where windows straddle both halves. This is not an issue if the two halves are both in the training split or both in the validation split, but if they are in different splits, there will be data leakage.\r\n\r\n**Possible solution**\r\n`RasterSource` should fill the overflowing regions in the window with NODATA values if `extent_crop` is set. This can be a configurable option so that it can be enabled only when the image is part of scenes from different data splits.\n", "before_files": [{"content": "import logging\nimport math\nimport os\nfrom pyproj import Transformer\nimport subprocess\nfrom decimal import Decimal\nimport tempfile\nfrom typing import Optional\n\nimport numpy as np\nimport rasterio\nfrom rasterio.enums import (ColorInterp, MaskFlags)\n\nfrom rastervision.pipeline.file_system import download_if_needed\nfrom rastervision.core.box import Box\nfrom rastervision.core.data.crs_transformer import RasterioCRSTransformer\nfrom rastervision.core.data.raster_source import (RasterSource, CropOffsets)\nfrom rastervision.core.data import (ActivateMixin, ActivationError)\n\nlog = logging.getLogger(__name__)\nwgs84 = 'epsg:4326'\nmeters_per_degree = 111319.5\n\n\ndef build_vrt(vrt_path, image_paths):\n \"\"\"Build a VRT for a set of TIFF files.\"\"\"\n cmd = ['gdalbuildvrt', vrt_path]\n cmd.extend(image_paths)\n subprocess.run(cmd)\n\n\ndef download_and_build_vrt(image_uris, tmp_dir):\n log.info('Building VRT...')\n image_paths = [download_if_needed(uri, tmp_dir) for uri in image_uris]\n image_path = os.path.join(tmp_dir, 'index.vrt')\n build_vrt(image_path, image_paths)\n return image_path\n\n\ndef stream_and_build_vrt(images_uris, tmp_dir):\n log.info('Building VRT...')\n image_paths = images_uris\n image_path = os.path.join(tmp_dir, 'index.vrt')\n build_vrt(image_path, image_paths)\n return image_path\n\n\ndef load_window(image_dataset, window=None, is_masked=False):\n \"\"\"Load a window of an image using Rasterio.\n\n Args:\n image_dataset: a Rasterio dataset\n window: ((row_start, row_stop), (col_start, col_stop)) or\n ((y_min, y_max), (x_min, x_max))\n is_masked: If True, read a masked array from rasterio\n\n Returns:\n np.ndarray of shape (height, width, channels) where channels is the number of\n channels in the image_dataset.\n \"\"\"\n if is_masked:\n im = image_dataset.read(window=window, boundless=True, masked=True)\n im = np.ma.filled(im, fill_value=0)\n else:\n im = image_dataset.read(window=window, boundless=True)\n\n # Handle non-zero NODATA values by setting the data to 0.\n for channel, nodata in enumerate(image_dataset.nodatavals):\n if nodata is not None and nodata != 0:\n im[channel, im[channel] == nodata] = 0\n\n im = np.transpose(im, axes=[1, 2, 0])\n return im\n\n\nclass RasterioSource(ActivateMixin, RasterSource):\n def __init__(self,\n uris,\n raster_transformers,\n tmp_dir,\n allow_streaming=False,\n channel_order=None,\n x_shift=0.0,\n y_shift=0.0,\n extent_crop: Optional[CropOffsets] = None):\n \"\"\"Constructor.\n\n This RasterSource can read any file that can be opened by Rasterio/GDAL\n including georeferenced formats such as GeoTIFF and non-georeferenced formats\n such as JPG. See https://www.gdal.org/formats_list.html for more details.\n\n If channel_order is None, then use non-alpha channels. This also sets any\n masked or NODATA pixel values to be zeros.\n\n Args:\n channel_order: list of indices of channels to extract from raw imagery\n extent_crop (CropOffsets, optional): Relative\n offsets (top, left, bottom, right) for cropping the extent.\n Useful for using splitting a scene into different datasets.\n Defaults to None i.e. no cropping.\n \"\"\"\n self.uris = uris\n self.tmp_dir = tmp_dir\n self.image_tmp_dir = None\n self.image_dataset = None\n self.x_shift = x_shift\n self.y_shift = y_shift\n self.do_shift = self.x_shift != 0.0 or self.y_shift != 0.0\n self.allow_streaming = allow_streaming\n self.extent_crop = extent_crop\n\n num_channels = None\n\n # Activate in order to get information out of the raster\n with self.activate():\n num_channels = self.image_dataset.count\n if channel_order is None:\n colorinterp = self.image_dataset.colorinterp\n if colorinterp:\n channel_order = [\n i for i, color_interp in enumerate(colorinterp)\n if color_interp != ColorInterp.alpha\n ]\n else:\n channel_order = list(range(0, num_channels))\n self.validate_channel_order(channel_order, num_channels)\n\n mask_flags = self.image_dataset.mask_flag_enums\n self.is_masked = any(\n [m for m in mask_flags if m != MaskFlags.all_valid])\n\n self.height = self.image_dataset.height\n self.width = self.image_dataset.width\n\n # Get 1x1 chip and apply raster transformers to test dtype.\n test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))\n test_chip = test_chip[:, :, channel_order]\n for transformer in raster_transformers:\n test_chip = transformer.transform(test_chip, channel_order)\n self.dtype = test_chip.dtype\n\n self._set_crs_transformer()\n\n super().__init__(channel_order, num_channels, raster_transformers)\n\n def _download_data(self, tmp_dir):\n \"\"\"Download any data needed for this Raster Source.\n\n Return a single local path representing the image or a VRT of the data.\n \"\"\"\n if len(self.uris) == 1:\n if self.allow_streaming:\n return self.uris[0]\n else:\n return download_if_needed(self.uris[0], tmp_dir)\n else:\n if self.allow_streaming:\n return stream_and_build_vrt(self.uris, tmp_dir)\n else:\n return download_and_build_vrt(self.uris, tmp_dir)\n\n def get_crs_transformer(self):\n return self.crs_transformer\n\n def get_extent(self):\n h, w = self.height, self.width\n if self.extent_crop is not None:\n skip_top, skip_left, skip_bottom, skip_right = self.extent_crop\n ymin, xmin = int(h * skip_top), int(w * skip_left)\n ymax, xmax = h - int(h * skip_bottom), w - int(w * skip_right)\n return Box(ymin, xmin, ymax, xmax)\n return Box(0, 0, h, w)\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n\n def _get_chip(self, window):\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n shifted_window = self._get_shifted_window(window)\n return load_window(\n self.image_dataset,\n window=shifted_window.rasterio_format(),\n is_masked=self.is_masked)\n\n def _activate(self):\n # Download images to temporary directory and delete them when done.\n self.image_tmp_dir = tempfile.TemporaryDirectory(dir=self.tmp_dir)\n self.imagery_path = self._download_data(self.image_tmp_dir.name)\n self.image_dataset = rasterio.open(self.imagery_path)\n self._set_crs_transformer()\n\n def _set_crs_transformer(self):\n self.crs_transformer = RasterioCRSTransformer.from_dataset(\n self.image_dataset)\n crs = self.image_dataset.crs\n self.to_wgs84 = None\n self.from_wgs84 = None\n if crs and self.do_shift:\n self.to_wgs84 = Transformer.from_crs(\n crs.wkt, wgs84, always_xy=True)\n self.from_wgs84 = Transformer.from_crs(\n wgs84, crs.wkt, always_xy=True)\n\n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n self.image_tmp_dir.cleanup()\n self.image_tmp_dir = None\n\n def _get_shifted_window(self, window):\n do_shift = self.x_shift != 0.0 or self.y_shift != 0.0\n if do_shift:\n ymin, xmin, ymax, xmax = window.tuple_format()\n width = window.get_width()\n height = window.get_height()\n\n # Transform image coordinates into world coordinates\n transform = self.image_dataset.transform\n xmin2, ymin2 = transform * (xmin, ymin)\n\n # Transform from world coordinates to WGS84\n if self.to_wgs84:\n lon, lat = self.to_wgs84.transform(xmin2, ymin2)\n else:\n lon, lat = xmin2, ymin2\n\n # Shift. This is performed by computing the shifts in\n # meters to shifts in degrees. Those shifts are then\n # applied to the WGS84 coordinate.\n #\n # Courtesy of https://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters # noqa\n lat_radians = math.pi * lat / 180.0\n dlon = Decimal(self.x_shift) / Decimal(\n meters_per_degree * math.cos(lat_radians))\n dlat = Decimal(self.y_shift) / Decimal(meters_per_degree)\n lon = float(Decimal(lon) + dlon)\n lat = float(Decimal(lat) + dlat)\n\n # Transform from WGS84 to world coordinates\n if self.from_wgs84:\n xmin3, ymin3 = self.from_wgs84.transform(lon, lat)\n else:\n xmin3, ymin3 = lon, lat\n\n # Trasnform from world coordinates back into image coordinates\n xmin4, ymin4 = ~transform * (xmin3, ymin3)\n\n window = Box(ymin4, xmin4, ymin4 + height, xmin4 + width)\n return window\n", "path": "rastervision_core/rastervision/core/data/raster_source/rasterio_source.py"}], "after_files": [{"content": "import logging\nimport math\nimport os\nfrom pyproj import Transformer\nimport subprocess\nfrom decimal import Decimal\nimport tempfile\nfrom typing import Optional\n\nimport numpy as np\nimport rasterio\nfrom rasterio.enums import (ColorInterp, MaskFlags)\n\nfrom rastervision.pipeline.file_system import download_if_needed\nfrom rastervision.core.box import Box\nfrom rastervision.core.data.crs_transformer import RasterioCRSTransformer\nfrom rastervision.core.data.raster_source import (RasterSource, CropOffsets)\nfrom rastervision.core.data import (ActivateMixin, ActivationError)\n\nlog = logging.getLogger(__name__)\nwgs84 = 'epsg:4326'\nmeters_per_degree = 111319.5\n\n\ndef build_vrt(vrt_path, image_paths):\n \"\"\"Build a VRT for a set of TIFF files.\"\"\"\n cmd = ['gdalbuildvrt', vrt_path]\n cmd.extend(image_paths)\n subprocess.run(cmd)\n\n\ndef download_and_build_vrt(image_uris, tmp_dir):\n log.info('Building VRT...')\n image_paths = [download_if_needed(uri, tmp_dir) for uri in image_uris]\n image_path = os.path.join(tmp_dir, 'index.vrt')\n build_vrt(image_path, image_paths)\n return image_path\n\n\ndef stream_and_build_vrt(images_uris, tmp_dir):\n log.info('Building VRT...')\n image_paths = images_uris\n image_path = os.path.join(tmp_dir, 'index.vrt')\n build_vrt(image_path, image_paths)\n return image_path\n\n\ndef load_window(image_dataset, window=None, is_masked=False):\n \"\"\"Load a window of an image using Rasterio.\n\n Args:\n image_dataset: a Rasterio dataset\n window: ((row_start, row_stop), (col_start, col_stop)) or\n ((y_min, y_max), (x_min, x_max))\n is_masked: If True, read a masked array from rasterio\n\n Returns:\n np.ndarray of shape (height, width, channels) where channels is the number of\n channels in the image_dataset.\n \"\"\"\n if is_masked:\n im = image_dataset.read(window=window, boundless=True, masked=True)\n im = np.ma.filled(im, fill_value=0)\n else:\n im = image_dataset.read(window=window, boundless=True)\n\n # Handle non-zero NODATA values by setting the data to 0.\n for channel, nodata in enumerate(image_dataset.nodatavals):\n if nodata is not None and nodata != 0:\n im[channel, im[channel] == nodata] = 0\n\n im = np.transpose(im, axes=[1, 2, 0])\n return im\n\n\ndef fill_overflow(extent: Box,\n window: Box,\n arr: np.ndarray,\n fill_value: int = 0) -> np.ndarray:\n \"\"\"Given a window and corresponding array of values, if the window\n overflows the extent, fill the overflowing regions with fill_value.\n \"\"\"\n top_overflow = max(0, extent.ymin - window.ymin)\n bottom_overflow = max(0, window.ymax - extent.ymax)\n left_overflow = max(0, extent.xmin - window.xmin)\n right_overflow = max(0, window.xmax - extent.xmax)\n\n h, w = arr.shape[:2]\n arr[:top_overflow] = fill_value\n arr[h - bottom_overflow:] = fill_value\n arr[:, :left_overflow] = fill_value\n arr[:, w - right_overflow:] = fill_value\n return arr\n\n\nclass RasterioSource(ActivateMixin, RasterSource):\n def __init__(self,\n uris,\n raster_transformers,\n tmp_dir,\n allow_streaming=False,\n channel_order=None,\n x_shift=0.0,\n y_shift=0.0,\n extent_crop: Optional[CropOffsets] = None):\n \"\"\"Constructor.\n\n This RasterSource can read any file that can be opened by Rasterio/GDAL\n including georeferenced formats such as GeoTIFF and non-georeferenced formats\n such as JPG. See https://www.gdal.org/formats_list.html for more details.\n\n If channel_order is None, then use non-alpha channels. This also sets any\n masked or NODATA pixel values to be zeros.\n\n Args:\n channel_order: list of indices of channels to extract from raw imagery\n extent_crop (CropOffsets, optional): Relative\n offsets (top, left, bottom, right) for cropping the extent.\n Useful for using splitting a scene into different datasets.\n Defaults to None i.e. no cropping.\n \"\"\"\n self.uris = uris\n self.tmp_dir = tmp_dir\n self.image_tmp_dir = None\n self.image_dataset = None\n self.x_shift = x_shift\n self.y_shift = y_shift\n self.do_shift = self.x_shift != 0.0 or self.y_shift != 0.0\n self.allow_streaming = allow_streaming\n self.extent_crop = extent_crop\n\n num_channels = None\n\n # Activate in order to get information out of the raster\n with self.activate():\n num_channels = self.image_dataset.count\n if channel_order is None:\n colorinterp = self.image_dataset.colorinterp\n if colorinterp:\n channel_order = [\n i for i, color_interp in enumerate(colorinterp)\n if color_interp != ColorInterp.alpha\n ]\n else:\n channel_order = list(range(0, num_channels))\n self.validate_channel_order(channel_order, num_channels)\n\n mask_flags = self.image_dataset.mask_flag_enums\n self.is_masked = any(\n [m for m in mask_flags if m != MaskFlags.all_valid])\n\n self.height = self.image_dataset.height\n self.width = self.image_dataset.width\n\n # Get 1x1 chip and apply raster transformers to test dtype.\n test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))\n test_chip = test_chip[:, :, channel_order]\n for transformer in raster_transformers:\n test_chip = transformer.transform(test_chip, channel_order)\n self.dtype = test_chip.dtype\n\n self._set_crs_transformer()\n\n super().__init__(channel_order, num_channels, raster_transformers)\n\n def _download_data(self, tmp_dir):\n \"\"\"Download any data needed for this Raster Source.\n\n Return a single local path representing the image or a VRT of the data.\n \"\"\"\n if len(self.uris) == 1:\n if self.allow_streaming:\n return self.uris[0]\n else:\n return download_if_needed(self.uris[0], tmp_dir)\n else:\n if self.allow_streaming:\n return stream_and_build_vrt(self.uris, tmp_dir)\n else:\n return download_and_build_vrt(self.uris, tmp_dir)\n\n def get_crs_transformer(self):\n return self.crs_transformer\n\n def get_extent(self):\n h, w = self.height, self.width\n if self.extent_crop is not None:\n skip_top, skip_left, skip_bottom, skip_right = self.extent_crop\n ymin, xmin = int(h * skip_top), int(w * skip_left)\n ymax, xmax = h - int(h * skip_bottom), w - int(w * skip_right)\n return Box(ymin, xmin, ymax, xmax)\n return Box(0, 0, h, w)\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n\n def _get_chip(self, window: Box) -> np.ndarray:\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n shifted_window = self._get_shifted_window(window)\n chip = load_window(\n self.image_dataset,\n window=shifted_window.rasterio_format(),\n is_masked=self.is_masked)\n if self.extent_crop is not None:\n chip = fill_overflow(self.get_extent(), window, chip)\n return chip\n\n def _activate(self):\n # Download images to temporary directory and delete them when done.\n self.image_tmp_dir = tempfile.TemporaryDirectory(dir=self.tmp_dir)\n self.imagery_path = self._download_data(self.image_tmp_dir.name)\n self.image_dataset = rasterio.open(self.imagery_path)\n self._set_crs_transformer()\n\n def _set_crs_transformer(self):\n self.crs_transformer = RasterioCRSTransformer.from_dataset(\n self.image_dataset)\n crs = self.image_dataset.crs\n self.to_wgs84 = None\n self.from_wgs84 = None\n if crs and self.do_shift:\n self.to_wgs84 = Transformer.from_crs(\n crs.wkt, wgs84, always_xy=True)\n self.from_wgs84 = Transformer.from_crs(\n wgs84, crs.wkt, always_xy=True)\n\n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n self.image_tmp_dir.cleanup()\n self.image_tmp_dir = None\n\n def _get_shifted_window(self, window):\n do_shift = self.x_shift != 0.0 or self.y_shift != 0.0\n if do_shift:\n ymin, xmin, ymax, xmax = window.tuple_format()\n width = window.get_width()\n height = window.get_height()\n\n # Transform image coordinates into world coordinates\n transform = self.image_dataset.transform\n xmin2, ymin2 = transform * (xmin, ymin)\n\n # Transform from world coordinates to WGS84\n if self.to_wgs84:\n lon, lat = self.to_wgs84.transform(xmin2, ymin2)\n else:\n lon, lat = xmin2, ymin2\n\n # Shift. This is performed by computing the shifts in\n # meters to shifts in degrees. Those shifts are then\n # applied to the WGS84 coordinate.\n #\n # Courtesy of https://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters # noqa\n lat_radians = math.pi * lat / 180.0\n dlon = Decimal(self.x_shift) / Decimal(\n meters_per_degree * math.cos(lat_radians))\n dlat = Decimal(self.y_shift) / Decimal(meters_per_degree)\n lon = float(Decimal(lon) + dlon)\n lat = float(Decimal(lat) + dlat)\n\n # Transform from WGS84 to world coordinates\n if self.from_wgs84:\n xmin3, ymin3 = self.from_wgs84.transform(lon, lat)\n else:\n xmin3, ymin3 = lon, lat\n\n # Trasnform from world coordinates back into image coordinates\n xmin4, ymin4 = ~transform * (xmin3, ymin3)\n\n window = Box(ymin4, xmin4, ymin4 + height, xmin4 + width)\n return window\n", "path": "rastervision_core/rastervision/core/data/raster_source/rasterio_source.py"}]} | 3,312 | 511 |
gh_patches_debug_5228 | rasdani/github-patches | git_diff | pyodide__pyodide-3074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Export version from pyodide JS module
## 🚀 Feature
In `pyodide.d.ts` I see `declare let version: string;` but it's not exported. It'd be great if it was exported so that I can do this:
```ts
import {version} from "pyodide";
```
### Motivation
I have [some code](https://github.com/alexmojaki/pyodide-worker-runner/blob/e7dd3d0ee1dff457bf9d6104944477840a83e5a7/lib/index.ts#L16) that roughly looks like this:
```ts
import {loadPyodide, PyodideInterface} from "pyodide";
const version = "0.21.1";
const indexURL = `https://cdn.jsdelivr.net/pyodide/v${version}/full/`;
const pyodide: PyodideInterface = await loadPyodide({indexURL});
if (pyodide.version !== version) {
throw new Error(
`loadPyodide loaded version ${pyodide.version} instead of ${version}`,
);
}
```
I'd like to import `version` instead of setting it manually, so that it always automatically matches whatever version of pyodide is installed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/bump_version.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import argparse
4 import difflib
5 import functools
6 import itertools
7 import pathlib
8 import re
9 from ast import Str
10 from collections import namedtuple
11 from collections.abc import Callable
12
13 CORE_VERSION_REGEX = r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)"
14
15 PYTHON_VERSION_REGEX = CORE_VERSION_REGEX + (
16 r"((?P<pre>a|b|rc)(?P<preversion>\d+))?" r"(\.(?P<dev>dev)(?P<devversion>\d+))?"
17 )
18
19 JS_VERSION_REGEX = CORE_VERSION_REGEX + (
20 r"(\-(?P<pre>alpha|beta|rc)\.(?P<preversion>\d+))?"
21 r"(\-(?P<dev>dev)\.(?P<devversion>\d+))?"
22 )
23
24
25 def build_version_pattern(pattern):
26 return re.compile(
27 pattern.format(
28 python_version=f"(?P<version>{PYTHON_VERSION_REGEX})",
29 js_version=f"(?P<version>{JS_VERSION_REGEX})",
30 )
31 )
32
33
34 ROOT = pathlib.Path(__file__).resolve().parent.parent
35 Target = namedtuple("target", ("file", "pattern", "prerelease"))
36 PYTHON_TARGETS = [
37 Target(
38 file=ROOT / "src/py/pyodide/__init__.py",
39 pattern=build_version_pattern('__version__ = "{python_version}"'),
40 prerelease=True,
41 ),
42 Target(
43 file=ROOT / "src/py/setup.cfg",
44 pattern=build_version_pattern("version = {python_version}"),
45 prerelease=True,
46 ),
47 Target(
48 ROOT / "pyodide-build/pyodide_build/__init__.py",
49 pattern=build_version_pattern('__version__ = "{python_version}"'),
50 prerelease=True,
51 ),
52 Target(
53 ROOT / "docs/conf.py",
54 build_version_pattern('pyodide_version = "{python_version}"'),
55 prerelease=True,
56 ),
57 Target(
58 ROOT / "run_docker",
59 build_version_pattern('PYODIDE_PREBUILT_IMAGE_TAG="{python_version}"'),
60 prerelease=False,
61 ),
62 Target(
63 ROOT / "docs/project/about.md",
64 build_version_pattern(r"version\s*=\s*{{{python_version}}}"),
65 prerelease=False,
66 ),
67 ]
68
69 JS_TARGETS = [
70 Target(
71 ROOT / "src/js/package.json",
72 build_version_pattern(r'"pyodide",\s*"version": "{js_version}"'),
73 prerelease=True,
74 ),
75 Target(
76 ROOT / "src/js/package-lock.json",
77 build_version_pattern(r'"pyodide",\s*"version": "{js_version}"'),
78 prerelease=True,
79 ),
80 ]
81
82
83 @functools.lru_cache
84 def python_version_to_js_version(version: str) -> Str:
85 """
86 Convert Python version name to JS version name
87 These two are different in prerelease or dev versions.
88 e.g. 1.2.3a0 <==> 1.2.3-alpha.0
89 4.5.6.dev2 <==> 4.5.6-dev.2
90 """
91 match = re.match(PYTHON_VERSION_REGEX, version)
92 matches = match.groupdict()
93
94 prerelease = matches["pre"] is not None
95 devrelease = matches["dev"] is not None
96
97 if prerelease and devrelease:
98 raise ValueError("Cannot have both prerelease and devrelease")
99 elif prerelease:
100 matches["pre"] = matches["pre"].replace("a", "alpha").replace("b", "beta")
101 return "{major}.{minor}.{patch}-{pre}.{preversion}".format(**matches)
102 elif devrelease:
103 return "{major}.{minor}.{patch}-{dev}.{devversion}".format(**matches)
104 else:
105 return "{major}.{minor}.{patch}".format(**matches)
106
107
108 @functools.lru_cache
109 def is_core_version(version: str) -> bool:
110 match = re.fullmatch(CORE_VERSION_REGEX, version)
111 if match is None:
112 return False
113
114 return True
115
116
117 def parse_current_version(target: Target) -> str:
118 """Parse current version"""
119 content = target.file.read_text()
120 match = target.pattern.search(content)
121
122 if match is None:
123 raise ValueError(f"Unabled to detect version string: {target.file}")
124
125 return match.groupdict()["version"]
126
127
128 def generate_updated_content(
129 target: Target, current_version: str, new_version: str
130 ) -> Callable:
131 file = target.file
132 pattern = target.pattern
133 content = file.read_text()
134
135 if current_version == new_version:
136 return None
137
138 # Some files only required to be bumped on core version release.
139 # For example, we don't deploy prebuilt docker images for dev release.
140 if not target.prerelease:
141 if not is_core_version(new_version):
142 print(f"[*] {file}: Skipped (not targeting a core version)")
143 return None
144
145 new_content = content
146 startpos = 0
147 while match := pattern.search(new_content, pos=startpos):
148 version = match.groupdict()["version"]
149 if version == current_version:
150 start, end = match.span()
151 new_span = new_content[start:end].replace(current_version, new_version)
152 new_content = new_content[:start] + new_span + new_content[end:]
153 startpos = end
154 elif version == new_version:
155 break
156 else:
157 raise ValueError(
158 f"'{file}' contains invalid version: expected '{current_version}' but found '{version}'"
159 )
160
161 show_diff(content, new_content, file)
162
163 return new_content
164
165
166 def show_diff(before: str, after: str, file: pathlib.Path):
167 diffs = list(
168 difflib.unified_diff(
169 before.splitlines(keepends=True), after.splitlines(keepends=True), n=0
170 )
171 )[2:]
172 print(f"[*] Diff of '{file}':\n")
173 print("".join(diffs))
174
175
176 def parse_args():
177 parser = argparse.ArgumentParser("Bump version strings in the Pyodide repository")
178 parser.add_argument("--new-version", help="New version")
179 parser.add_argument(
180 "--dry-run", action="store_true", help="Don't actually write anything"
181 )
182
183 return parser.parse_args()
184
185
186 def main():
187 args = parse_args()
188
189 if args.new_version is None:
190 new_version = input("New version (e.g. 0.22.0, 0.22.0a0, 0.22.0.dev0): ")
191 else:
192 new_version = args.new_version
193
194 if re.fullmatch(PYTHON_VERSION_REGEX, new_version) is None:
195 raise ValueError(f"Invalid new version: {new_version}")
196
197 new_version_py = new_version
198 new_version_js = python_version_to_js_version(new_version)
199
200 # We want to update files in all-or-nothing strategy,
201 # so we keep the queue of update functions
202 update_queue = []
203
204 targets = itertools.chain(
205 zip(PYTHON_TARGETS, [new_version_py] * len(PYTHON_TARGETS)),
206 zip(JS_TARGETS, [new_version_js] * len(JS_TARGETS)),
207 )
208 for target, new_version in targets:
209 current_version = parse_current_version(target)
210 new_content = generate_updated_content(target, current_version, new_version)
211 if new_content is not None:
212 update_queue.append((target, new_content))
213
214 if args.dry_run:
215 return
216
217 for target, content in update_queue:
218 target.file.write_text(content)
219
220
221 if __name__ == "__main__":
222 main()
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/bump_version.py b/tools/bump_version.py
--- a/tools/bump_version.py
+++ b/tools/bump_version.py
@@ -64,6 +64,16 @@
build_version_pattern(r"version\s*=\s*{{{python_version}}}"),
prerelease=False,
),
+ Target(
+ ROOT / "src/js/version.ts",
+ build_version_pattern('version: string = "{python_version}"'),
+ prerelease=True,
+ ),
+ Target(
+ ROOT / "src/core/pre.js",
+ build_version_pattern('API.version = "{python_version}"'),
+ prerelease=True,
+ ),
]
JS_TARGETS = [
| {"golden_diff": "diff --git a/tools/bump_version.py b/tools/bump_version.py\n--- a/tools/bump_version.py\n+++ b/tools/bump_version.py\n@@ -64,6 +64,16 @@\n build_version_pattern(r\"version\\s*=\\s*{{{python_version}}}\"),\n prerelease=False,\n ),\n+ Target(\n+ ROOT / \"src/js/version.ts\",\n+ build_version_pattern('version: string = \"{python_version}\"'),\n+ prerelease=True,\n+ ),\n+ Target(\n+ ROOT / \"src/core/pre.js\",\n+ build_version_pattern('API.version = \"{python_version}\"'),\n+ prerelease=True,\n+ ),\n ]\n \n JS_TARGETS = [\n", "issue": "Export version from pyodide JS module\n## \ud83d\ude80 Feature\r\n\r\nIn `pyodide.d.ts` I see `declare let version: string;` but it's not exported. It'd be great if it was exported so that I can do this:\r\n\r\n```ts\r\nimport {version} from \"pyodide\";\r\n```\r\n\r\n### Motivation\r\n\r\nI have [some code](https://github.com/alexmojaki/pyodide-worker-runner/blob/e7dd3d0ee1dff457bf9d6104944477840a83e5a7/lib/index.ts#L16) that roughly looks like this:\r\n\r\n```ts\r\nimport {loadPyodide, PyodideInterface} from \"pyodide\";\r\n\r\nconst version = \"0.21.1\";\r\nconst indexURL = `https://cdn.jsdelivr.net/pyodide/v${version}/full/`;\r\nconst pyodide: PyodideInterface = await loadPyodide({indexURL});\r\nif (pyodide.version !== version) {\r\n throw new Error(\r\n `loadPyodide loaded version ${pyodide.version} instead of ${version}`,\r\n );\r\n}\r\n```\r\n\r\nI'd like to import `version` instead of setting it manually, so that it always automatically matches whatever version of pyodide is installed.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport difflib\nimport functools\nimport itertools\nimport pathlib\nimport re\nfrom ast import Str\nfrom collections import namedtuple\nfrom collections.abc import Callable\n\nCORE_VERSION_REGEX = r\"(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)\"\n\nPYTHON_VERSION_REGEX = CORE_VERSION_REGEX + (\n r\"((?P<pre>a|b|rc)(?P<preversion>\\d+))?\" r\"(\\.(?P<dev>dev)(?P<devversion>\\d+))?\"\n)\n\nJS_VERSION_REGEX = CORE_VERSION_REGEX + (\n r\"(\\-(?P<pre>alpha|beta|rc)\\.(?P<preversion>\\d+))?\"\n r\"(\\-(?P<dev>dev)\\.(?P<devversion>\\d+))?\"\n)\n\n\ndef build_version_pattern(pattern):\n return re.compile(\n pattern.format(\n python_version=f\"(?P<version>{PYTHON_VERSION_REGEX})\",\n js_version=f\"(?P<version>{JS_VERSION_REGEX})\",\n )\n )\n\n\nROOT = pathlib.Path(__file__).resolve().parent.parent\nTarget = namedtuple(\"target\", (\"file\", \"pattern\", \"prerelease\"))\nPYTHON_TARGETS = [\n Target(\n file=ROOT / \"src/py/pyodide/__init__.py\",\n pattern=build_version_pattern('__version__ = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n file=ROOT / \"src/py/setup.cfg\",\n pattern=build_version_pattern(\"version = {python_version}\"),\n prerelease=True,\n ),\n Target(\n ROOT / \"pyodide-build/pyodide_build/__init__.py\",\n pattern=build_version_pattern('__version__ = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"docs/conf.py\",\n build_version_pattern('pyodide_version = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"run_docker\",\n build_version_pattern('PYODIDE_PREBUILT_IMAGE_TAG=\"{python_version}\"'),\n prerelease=False,\n ),\n Target(\n ROOT / \"docs/project/about.md\",\n build_version_pattern(r\"version\\s*=\\s*{{{python_version}}}\"),\n prerelease=False,\n ),\n]\n\nJS_TARGETS = [\n Target(\n ROOT / \"src/js/package.json\",\n build_version_pattern(r'\"pyodide\",\\s*\"version\": \"{js_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"src/js/package-lock.json\",\n build_version_pattern(r'\"pyodide\",\\s*\"version\": \"{js_version}\"'),\n prerelease=True,\n ),\n]\n\n\[email protected]_cache\ndef python_version_to_js_version(version: str) -> Str:\n \"\"\"\n Convert Python version name to JS version name\n These two are different in prerelease or dev versions.\n e.g. 1.2.3a0 <==> 1.2.3-alpha.0\n 4.5.6.dev2 <==> 4.5.6-dev.2\n \"\"\"\n match = re.match(PYTHON_VERSION_REGEX, version)\n matches = match.groupdict()\n\n prerelease = matches[\"pre\"] is not None\n devrelease = matches[\"dev\"] is not None\n\n if prerelease and devrelease:\n raise ValueError(\"Cannot have both prerelease and devrelease\")\n elif prerelease:\n matches[\"pre\"] = matches[\"pre\"].replace(\"a\", \"alpha\").replace(\"b\", \"beta\")\n return \"{major}.{minor}.{patch}-{pre}.{preversion}\".format(**matches)\n elif devrelease:\n return \"{major}.{minor}.{patch}-{dev}.{devversion}\".format(**matches)\n else:\n return \"{major}.{minor}.{patch}\".format(**matches)\n\n\[email protected]_cache\ndef is_core_version(version: str) -> bool:\n match = re.fullmatch(CORE_VERSION_REGEX, version)\n if match is None:\n return False\n\n return True\n\n\ndef parse_current_version(target: Target) -> str:\n \"\"\"Parse current version\"\"\"\n content = target.file.read_text()\n match = target.pattern.search(content)\n\n if match is None:\n raise ValueError(f\"Unabled to detect version string: {target.file}\")\n\n return match.groupdict()[\"version\"]\n\n\ndef generate_updated_content(\n target: Target, current_version: str, new_version: str\n) -> Callable:\n file = target.file\n pattern = target.pattern\n content = file.read_text()\n\n if current_version == new_version:\n return None\n\n # Some files only required to be bumped on core version release.\n # For example, we don't deploy prebuilt docker images for dev release.\n if not target.prerelease:\n if not is_core_version(new_version):\n print(f\"[*] {file}: Skipped (not targeting a core version)\")\n return None\n\n new_content = content\n startpos = 0\n while match := pattern.search(new_content, pos=startpos):\n version = match.groupdict()[\"version\"]\n if version == current_version:\n start, end = match.span()\n new_span = new_content[start:end].replace(current_version, new_version)\n new_content = new_content[:start] + new_span + new_content[end:]\n startpos = end\n elif version == new_version:\n break\n else:\n raise ValueError(\n f\"'{file}' contains invalid version: expected '{current_version}' but found '{version}'\"\n )\n\n show_diff(content, new_content, file)\n\n return new_content\n\n\ndef show_diff(before: str, after: str, file: pathlib.Path):\n diffs = list(\n difflib.unified_diff(\n before.splitlines(keepends=True), after.splitlines(keepends=True), n=0\n )\n )[2:]\n print(f\"[*] Diff of '{file}':\\n\")\n print(\"\".join(diffs))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Bump version strings in the Pyodide repository\")\n parser.add_argument(\"--new-version\", help=\"New version\")\n parser.add_argument(\n \"--dry-run\", action=\"store_true\", help=\"Don't actually write anything\"\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n if args.new_version is None:\n new_version = input(\"New version (e.g. 0.22.0, 0.22.0a0, 0.22.0.dev0): \")\n else:\n new_version = args.new_version\n\n if re.fullmatch(PYTHON_VERSION_REGEX, new_version) is None:\n raise ValueError(f\"Invalid new version: {new_version}\")\n\n new_version_py = new_version\n new_version_js = python_version_to_js_version(new_version)\n\n # We want to update files in all-or-nothing strategy,\n # so we keep the queue of update functions\n update_queue = []\n\n targets = itertools.chain(\n zip(PYTHON_TARGETS, [new_version_py] * len(PYTHON_TARGETS)),\n zip(JS_TARGETS, [new_version_js] * len(JS_TARGETS)),\n )\n for target, new_version in targets:\n current_version = parse_current_version(target)\n new_content = generate_updated_content(target, current_version, new_version)\n if new_content is not None:\n update_queue.append((target, new_content))\n\n if args.dry_run:\n return\n\n for target, content in update_queue:\n target.file.write_text(content)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/bump_version.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport difflib\nimport functools\nimport itertools\nimport pathlib\nimport re\nfrom ast import Str\nfrom collections import namedtuple\nfrom collections.abc import Callable\n\nCORE_VERSION_REGEX = r\"(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)\"\n\nPYTHON_VERSION_REGEX = CORE_VERSION_REGEX + (\n r\"((?P<pre>a|b|rc)(?P<preversion>\\d+))?\" r\"(\\.(?P<dev>dev)(?P<devversion>\\d+))?\"\n)\n\nJS_VERSION_REGEX = CORE_VERSION_REGEX + (\n r\"(\\-(?P<pre>alpha|beta|rc)\\.(?P<preversion>\\d+))?\"\n r\"(\\-(?P<dev>dev)\\.(?P<devversion>\\d+))?\"\n)\n\n\ndef build_version_pattern(pattern):\n return re.compile(\n pattern.format(\n python_version=f\"(?P<version>{PYTHON_VERSION_REGEX})\",\n js_version=f\"(?P<version>{JS_VERSION_REGEX})\",\n )\n )\n\n\nROOT = pathlib.Path(__file__).resolve().parent.parent\nTarget = namedtuple(\"target\", (\"file\", \"pattern\", \"prerelease\"))\nPYTHON_TARGETS = [\n Target(\n file=ROOT / \"src/py/pyodide/__init__.py\",\n pattern=build_version_pattern('__version__ = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n file=ROOT / \"src/py/setup.cfg\",\n pattern=build_version_pattern(\"version = {python_version}\"),\n prerelease=True,\n ),\n Target(\n ROOT / \"pyodide-build/pyodide_build/__init__.py\",\n pattern=build_version_pattern('__version__ = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"docs/conf.py\",\n build_version_pattern('pyodide_version = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"run_docker\",\n build_version_pattern('PYODIDE_PREBUILT_IMAGE_TAG=\"{python_version}\"'),\n prerelease=False,\n ),\n Target(\n ROOT / \"docs/project/about.md\",\n build_version_pattern(r\"version\\s*=\\s*{{{python_version}}}\"),\n prerelease=False,\n ),\n Target(\n ROOT / \"src/js/version.ts\",\n build_version_pattern('version: string = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"src/core/pre.js\",\n build_version_pattern('API.version = \"{python_version}\"'),\n prerelease=True,\n ),\n]\n\nJS_TARGETS = [\n Target(\n ROOT / \"src/js/package.json\",\n build_version_pattern(r'\"pyodide\",\\s*\"version\": \"{js_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"src/js/package-lock.json\",\n build_version_pattern(r'\"pyodide\",\\s*\"version\": \"{js_version}\"'),\n prerelease=True,\n ),\n]\n\n\[email protected]_cache\ndef python_version_to_js_version(version: str) -> Str:\n \"\"\"\n Convert Python version name to JS version name\n These two are different in prerelease or dev versions.\n e.g. 1.2.3a0 <==> 1.2.3-alpha.0\n 4.5.6.dev2 <==> 4.5.6-dev.2\n \"\"\"\n match = re.match(PYTHON_VERSION_REGEX, version)\n matches = match.groupdict()\n\n prerelease = matches[\"pre\"] is not None\n devrelease = matches[\"dev\"] is not None\n\n if prerelease and devrelease:\n raise ValueError(\"Cannot have both prerelease and devrelease\")\n elif prerelease:\n matches[\"pre\"] = matches[\"pre\"].replace(\"a\", \"alpha\").replace(\"b\", \"beta\")\n return \"{major}.{minor}.{patch}-{pre}.{preversion}\".format(**matches)\n elif devrelease:\n return \"{major}.{minor}.{patch}-{dev}.{devversion}\".format(**matches)\n else:\n return \"{major}.{minor}.{patch}\".format(**matches)\n\n\[email protected]_cache\ndef is_core_version(version: str) -> bool:\n match = re.fullmatch(CORE_VERSION_REGEX, version)\n if match is None:\n return False\n\n return True\n\n\ndef parse_current_version(target: Target) -> str:\n \"\"\"Parse current version\"\"\"\n content = target.file.read_text()\n match = target.pattern.search(content)\n\n if match is None:\n raise ValueError(f\"Unabled to detect version string: {target.file}\")\n\n return match.groupdict()[\"version\"]\n\n\ndef generate_updated_content(\n target: Target, current_version: str, new_version: str\n) -> Callable:\n file = target.file\n pattern = target.pattern\n content = file.read_text()\n\n if current_version == new_version:\n return None\n\n # Some files only required to be bumped on core version release.\n # For example, we don't deploy prebuilt docker images for dev release.\n if not target.prerelease:\n if not is_core_version(new_version):\n print(f\"[*] {file}: Skipped (not targeting a core version)\")\n return None\n\n new_content = content\n startpos = 0\n while match := pattern.search(new_content, pos=startpos):\n version = match.groupdict()[\"version\"]\n if version == current_version:\n start, end = match.span()\n new_span = new_content[start:end].replace(current_version, new_version)\n new_content = new_content[:start] + new_span + new_content[end:]\n startpos = end\n elif version == new_version:\n break\n else:\n raise ValueError(\n f\"'{file}' contains invalid version: expected '{current_version}' but found '{version}'\"\n )\n\n show_diff(content, new_content, file)\n\n return new_content\n\n\ndef show_diff(before: str, after: str, file: pathlib.Path):\n diffs = list(\n difflib.unified_diff(\n before.splitlines(keepends=True), after.splitlines(keepends=True), n=0\n )\n )[2:]\n print(f\"[*] Diff of '{file}':\\n\")\n print(\"\".join(diffs))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Bump version strings in the Pyodide repository\")\n parser.add_argument(\"--new-version\", help=\"New version\")\n parser.add_argument(\n \"--dry-run\", action=\"store_true\", help=\"Don't actually write anything\"\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n if args.new_version is None:\n new_version = input(\"New version (e.g. 0.22.0, 0.22.0a0, 0.22.0.dev0): \")\n else:\n new_version = args.new_version\n\n if re.fullmatch(PYTHON_VERSION_REGEX, new_version) is None:\n raise ValueError(f\"Invalid new version: {new_version}\")\n\n new_version_py = new_version\n new_version_js = python_version_to_js_version(new_version)\n\n # We want to update files in all-or-nothing strategy,\n # so we keep the queue of update functions\n update_queue = []\n\n targets = itertools.chain(\n zip(PYTHON_TARGETS, [new_version_py] * len(PYTHON_TARGETS)),\n zip(JS_TARGETS, [new_version_js] * len(JS_TARGETS)),\n )\n for target, new_version in targets:\n current_version = parse_current_version(target)\n new_content = generate_updated_content(target, current_version, new_version)\n if new_content is not None:\n update_queue.append((target, new_content))\n\n if args.dry_run:\n return\n\n for target, content in update_queue:\n target.file.write_text(content)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/bump_version.py"}]} | 2,774 | 151 |
gh_patches_debug_35163 | rasdani/github-patches | git_diff | StackStorm__st2-4174 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incomplete help for: st2 action-alias match
```
$ st2 action-alias match --help
usage: st2 action-alias match [-t TOKEN] [--api-key API_KEY] [-j] [-y]
[-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]]
command
st2 action-alias match: error: too few arguments
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2client/st2client/commands/action_alias.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17
18 from st2client.models import core
19 from st2client.models.action_alias import ActionAlias
20 from st2client.models.action_alias import ActionAliasMatch
21 from st2client.commands import resource
22 from st2client.formatters import table
23
24
25 __all__ = [
26 'ActionAliasBranch',
27 'ActionAliasMatchCommand',
28 'ActionAliasExecuteCommand'
29 ]
30
31
32 class ActionAliasBranch(resource.ResourceBranch):
33 def __init__(self, description, app, subparsers, parent_parser=None):
34 super(ActionAliasBranch, self).__init__(
35 ActionAlias, description, app, subparsers,
36 parent_parser=parent_parser, read_only=False,
37 commands={
38 'list': ActionAliasListCommand,
39 'get': ActionAliasGetCommand
40 })
41
42 self.commands['match'] = ActionAliasMatchCommand(
43 self.resource, self.app, self.subparsers,
44 add_help=False)
45 self.commands['execute'] = ActionAliasExecuteCommand(
46 self.resource, self.app, self.subparsers,
47 add_help=False)
48
49
50 class ActionAliasListCommand(resource.ContentPackResourceListCommand):
51 display_attributes = ['ref', 'pack', 'description', 'enabled']
52
53
54 class ActionAliasGetCommand(resource.ContentPackResourceGetCommand):
55 display_attributes = ['all']
56 attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
57 'enabled', 'action_ref', 'formats']
58
59
60 class ActionAliasMatchCommand(resource.ResourceCommand):
61 display_attributes = ['name', 'description']
62
63 def __init__(self, resource, *args, **kwargs):
64 super(ActionAliasMatchCommand, self).__init__(
65 resource, 'match',
66 'Get the list of %s that match the command text.' %
67 resource.get_plural_display_name().lower(),
68 *args, **kwargs)
69
70 self.parser.add_argument('match_text',
71 metavar='command',
72 help=help)
73 self.parser.add_argument('-h', '--help',
74 action='store_true', dest='help',
75 help='Print usage for the given action.')
76 self.parser.add_argument('-a', '--attr', nargs='+',
77 default=self.display_attributes,
78 help=('List of attributes to include in the '
79 'output. "all" will return all '
80 'attributes.'))
81 self.parser.add_argument('-w', '--width', nargs='+', type=int,
82 default=None,
83 help=('Set the width of columns in output.'))
84
85 @resource.add_auth_token_to_kwargs_from_cli
86 def run(self, args, **kwargs):
87 alias_match = ActionAliasMatch()
88 alias_match.command = args.match_text
89
90 match, _ = self.manager.match(alias_match, **kwargs)
91 return [match]
92
93 def run_and_print(self, args, **kwargs):
94 instances = self.run(args, **kwargs)
95 self.print_output(instances, table.MultiColumnTable,
96 attributes=args.attr, widths=args.width,
97 json=args.json, yaml=args.yaml)
98
99
100 class ActionAliasExecuteCommand(resource.ResourceCommand):
101 display_attributes = ['name']
102
103 def __init__(self, resource, *args, **kwargs):
104 super(ActionAliasExecuteCommand, self).__init__(
105 resource, 'execute',
106 ('Execute the command text by finding a matching %s.' %
107 resource.get_display_name().lower()), *args, **kwargs)
108
109 self.parser.add_argument('command_text',
110 metavar='command',
111 help=help)
112 self.parser.add_argument('-h', '--help',
113 action='store_true', dest='help',
114 help='Print usage for the given action.')
115 self.parser.add_argument('-u', '--user', type=str, default=None,
116 help='User under which to run the action (admins only).')
117
118 @resource.add_auth_token_to_kwargs_from_cli
119 def run(self, args, **kwargs):
120 payload = core.Resource()
121 payload.command = args.command_text
122 payload.user = args.user
123 payload.source_channel = 'cli'
124
125 alias_execution_mgr = self.app.client.managers['ActionAliasExecution']
126 execution = alias_execution_mgr.match_and_execute(payload)
127 return execution
128
129 def run_and_print(self, args, **kwargs):
130 execution = self.run(args, **kwargs)
131 print("Matching Action-alias: '%s'" % execution.actionalias['ref'])
132 print("To get the results, execute:\n st2 execution get %s" %
133 (execution.execution['id']))
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py
--- a/st2client/st2client/commands/action_alias.py
+++ b/st2client/st2client/commands/action_alias.py
@@ -41,10 +41,10 @@
self.commands['match'] = ActionAliasMatchCommand(
self.resource, self.app, self.subparsers,
- add_help=False)
+ add_help=True)
self.commands['execute'] = ActionAliasExecuteCommand(
self.resource, self.app, self.subparsers,
- add_help=False)
+ add_help=True)
class ActionAliasListCommand(resource.ContentPackResourceListCommand):
@@ -69,10 +69,8 @@
self.parser.add_argument('match_text',
metavar='command',
- help=help)
- self.parser.add_argument('-h', '--help',
- action='store_true', dest='help',
- help='Print usage for the given action.')
+ help=('Get the list of %s that match the command text.' %
+ resource.get_display_name().lower()))
self.parser.add_argument('-a', '--attr', nargs='+',
default=self.display_attributes,
help=('List of attributes to include in the '
@@ -108,10 +106,8 @@
self.parser.add_argument('command_text',
metavar='command',
- help=help)
- self.parser.add_argument('-h', '--help',
- action='store_true', dest='help',
- help='Print usage for the given action.')
+ help=('Execute the command text by finding a matching %s.' %
+ resource.get_display_name().lower()))
self.parser.add_argument('-u', '--user', type=str, default=None,
help='User under which to run the action (admins only).')
| {"golden_diff": "diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py\n--- a/st2client/st2client/commands/action_alias.py\n+++ b/st2client/st2client/commands/action_alias.py\n@@ -41,10 +41,10 @@\n \n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n- add_help=False)\n+ add_help=True)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n- add_help=False)\n+ add_help=True)\n \n \n class ActionAliasListCommand(resource.ContentPackResourceListCommand):\n@@ -69,10 +69,8 @@\n \n self.parser.add_argument('match_text',\n metavar='command',\n- help=help)\n- self.parser.add_argument('-h', '--help',\n- action='store_true', dest='help',\n- help='Print usage for the given action.')\n+ help=('Get the list of %s that match the command text.' %\n+ resource.get_display_name().lower()))\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n@@ -108,10 +106,8 @@\n \n self.parser.add_argument('command_text',\n metavar='command',\n- help=help)\n- self.parser.add_argument('-h', '--help',\n- action='store_true', dest='help',\n- help='Print usage for the given action.')\n+ help=('Execute the command text by finding a matching %s.' %\n+ resource.get_display_name().lower()))\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n", "issue": "Incomplete help for: st2 action-alias match\n```\r\n$ st2 action-alias match --help\r\nusage: st2 action-alias match [-t TOKEN] [--api-key API_KEY] [-j] [-y]\r\n [-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]]\r\n command\r\nst2 action-alias match: error: too few arguments\r\n```\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2client.models import core\nfrom st2client.models.action_alias import ActionAlias\nfrom st2client.models.action_alias import ActionAliasMatch\nfrom st2client.commands import resource\nfrom st2client.formatters import table\n\n\n__all__ = [\n 'ActionAliasBranch',\n 'ActionAliasMatchCommand',\n 'ActionAliasExecuteCommand'\n]\n\n\nclass ActionAliasBranch(resource.ResourceBranch):\n def __init__(self, description, app, subparsers, parent_parser=None):\n super(ActionAliasBranch, self).__init__(\n ActionAlias, description, app, subparsers,\n parent_parser=parent_parser, read_only=False,\n commands={\n 'list': ActionAliasListCommand,\n 'get': ActionAliasGetCommand\n })\n\n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n add_help=False)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n add_help=False)\n\n\nclass ActionAliasListCommand(resource.ContentPackResourceListCommand):\n display_attributes = ['ref', 'pack', 'description', 'enabled']\n\n\nclass ActionAliasGetCommand(resource.ContentPackResourceGetCommand):\n display_attributes = ['all']\n attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',\n 'enabled', 'action_ref', 'formats']\n\n\nclass ActionAliasMatchCommand(resource.ResourceCommand):\n display_attributes = ['name', 'description']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasMatchCommand, self).__init__(\n resource, 'match',\n 'Get the list of %s that match the command text.' %\n resource.get_plural_display_name().lower(),\n *args, **kwargs)\n\n self.parser.add_argument('match_text',\n metavar='command',\n help=help)\n self.parser.add_argument('-h', '--help',\n action='store_true', dest='help',\n help='Print usage for the given action.')\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n 'output. \"all\" will return all '\n 'attributes.'))\n self.parser.add_argument('-w', '--width', nargs='+', type=int,\n default=None,\n help=('Set the width of columns in output.'))\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n alias_match = ActionAliasMatch()\n alias_match.command = args.match_text\n\n match, _ = self.manager.match(alias_match, **kwargs)\n return [match]\n\n def run_and_print(self, args, **kwargs):\n instances = self.run(args, **kwargs)\n self.print_output(instances, table.MultiColumnTable,\n attributes=args.attr, widths=args.width,\n json=args.json, yaml=args.yaml)\n\n\nclass ActionAliasExecuteCommand(resource.ResourceCommand):\n display_attributes = ['name']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasExecuteCommand, self).__init__(\n resource, 'execute',\n ('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()), *args, **kwargs)\n\n self.parser.add_argument('command_text',\n metavar='command',\n help=help)\n self.parser.add_argument('-h', '--help',\n action='store_true', dest='help',\n help='Print usage for the given action.')\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n payload = core.Resource()\n payload.command = args.command_text\n payload.user = args.user\n payload.source_channel = 'cli'\n\n alias_execution_mgr = self.app.client.managers['ActionAliasExecution']\n execution = alias_execution_mgr.match_and_execute(payload)\n return execution\n\n def run_and_print(self, args, **kwargs):\n execution = self.run(args, **kwargs)\n print(\"Matching Action-alias: '%s'\" % execution.actionalias['ref'])\n print(\"To get the results, execute:\\n st2 execution get %s\" %\n (execution.execution['id']))\n", "path": "st2client/st2client/commands/action_alias.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2client.models import core\nfrom st2client.models.action_alias import ActionAlias\nfrom st2client.models.action_alias import ActionAliasMatch\nfrom st2client.commands import resource\nfrom st2client.formatters import table\n\n\n__all__ = [\n 'ActionAliasBranch',\n 'ActionAliasMatchCommand',\n 'ActionAliasExecuteCommand'\n]\n\n\nclass ActionAliasBranch(resource.ResourceBranch):\n def __init__(self, description, app, subparsers, parent_parser=None):\n super(ActionAliasBranch, self).__init__(\n ActionAlias, description, app, subparsers,\n parent_parser=parent_parser, read_only=False,\n commands={\n 'list': ActionAliasListCommand,\n 'get': ActionAliasGetCommand\n })\n\n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n add_help=True)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n add_help=True)\n\n\nclass ActionAliasListCommand(resource.ContentPackResourceListCommand):\n display_attributes = ['ref', 'pack', 'description', 'enabled']\n\n\nclass ActionAliasGetCommand(resource.ContentPackResourceGetCommand):\n display_attributes = ['all']\n attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',\n 'enabled', 'action_ref', 'formats']\n\n\nclass ActionAliasMatchCommand(resource.ResourceCommand):\n display_attributes = ['name', 'description']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasMatchCommand, self).__init__(\n resource, 'match',\n 'Get the list of %s that match the command text.' %\n resource.get_plural_display_name().lower(),\n *args, **kwargs)\n\n self.parser.add_argument('match_text',\n metavar='command',\n help=('Get the list of %s that match the command text.' %\n resource.get_display_name().lower()))\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n 'output. \"all\" will return all '\n 'attributes.'))\n self.parser.add_argument('-w', '--width', nargs='+', type=int,\n default=None,\n help=('Set the width of columns in output.'))\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n alias_match = ActionAliasMatch()\n alias_match.command = args.match_text\n\n match, _ = self.manager.match(alias_match, **kwargs)\n return [match]\n\n def run_and_print(self, args, **kwargs):\n instances = self.run(args, **kwargs)\n self.print_output(instances, table.MultiColumnTable,\n attributes=args.attr, widths=args.width,\n json=args.json, yaml=args.yaml)\n\n\nclass ActionAliasExecuteCommand(resource.ResourceCommand):\n display_attributes = ['name']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasExecuteCommand, self).__init__(\n resource, 'execute',\n ('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()), *args, **kwargs)\n\n self.parser.add_argument('command_text',\n metavar='command',\n help=('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()))\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n payload = core.Resource()\n payload.command = args.command_text\n payload.user = args.user\n payload.source_channel = 'cli'\n\n alias_execution_mgr = self.app.client.managers['ActionAliasExecution']\n execution = alias_execution_mgr.match_and_execute(payload)\n return execution\n\n def run_and_print(self, args, **kwargs):\n execution = self.run(args, **kwargs)\n print(\"Matching Action-alias: '%s'\" % execution.actionalias['ref'])\n print(\"To get the results, execute:\\n st2 execution get %s\" %\n (execution.execution['id']))\n", "path": "st2client/st2client/commands/action_alias.py"}]} | 1,754 | 406 |
gh_patches_debug_17804 | rasdani/github-patches | git_diff | pypa__pip-6616 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace overly specific function definition with more general alternative
In `outdated.py`, there is a function definition for `was_installed_by_pip` which can be simplified (or gotten rid of), by using `get_installer` defined in another file.
https://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/outdated.py#L79
https://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/packaging.py#L86
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/utils/outdated.py`
Content:
```
1 from __future__ import absolute_import
2
3 import datetime
4 import json
5 import logging
6 import os.path
7 import sys
8
9 from pip._vendor import lockfile, pkg_resources
10 from pip._vendor.packaging import version as packaging_version
11
12 from pip._internal.index import PackageFinder
13 from pip._internal.utils.compat import WINDOWS
14 from pip._internal.utils.filesystem import check_path_owner
15 from pip._internal.utils.misc import ensure_dir, get_installed_version
16 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
17
18 if MYPY_CHECK_RUNNING:
19 import optparse
20 from typing import Any, Dict
21 from pip._internal.download import PipSession
22
23
24 SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
25
26
27 logger = logging.getLogger(__name__)
28
29
30 class SelfCheckState(object):
31 def __init__(self, cache_dir):
32 # type: (str) -> None
33 self.state = {} # type: Dict[str, Any]
34 self.statefile_path = None
35
36 # Try to load the existing state
37 if cache_dir:
38 self.statefile_path = os.path.join(cache_dir, "selfcheck.json")
39 try:
40 with open(self.statefile_path) as statefile:
41 self.state = json.load(statefile)[sys.prefix]
42 except (IOError, ValueError, KeyError):
43 # Explicitly suppressing exceptions, since we don't want to
44 # error out if the cache file is invalid.
45 pass
46
47 def save(self, pypi_version, current_time):
48 # type: (str, datetime.datetime) -> None
49 # If we do not have a path to cache in, don't bother saving.
50 if not self.statefile_path:
51 return
52
53 # Check to make sure that we own the directory
54 if not check_path_owner(os.path.dirname(self.statefile_path)):
55 return
56
57 # Now that we've ensured the directory is owned by this user, we'll go
58 # ahead and make sure that all our directories are created.
59 ensure_dir(os.path.dirname(self.statefile_path))
60
61 # Attempt to write out our version check file
62 with lockfile.LockFile(self.statefile_path):
63 if os.path.exists(self.statefile_path):
64 with open(self.statefile_path) as statefile:
65 state = json.load(statefile)
66 else:
67 state = {}
68
69 state[sys.prefix] = {
70 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
71 "pypi_version": pypi_version,
72 }
73
74 with open(self.statefile_path, "w") as statefile:
75 json.dump(state, statefile, sort_keys=True,
76 separators=(",", ":"))
77
78
79 def was_installed_by_pip(pkg):
80 # type: (str) -> bool
81 """Checks whether pkg was installed by pip
82
83 This is used not to display the upgrade message when pip is in fact
84 installed by system package manager, such as dnf on Fedora.
85 """
86 try:
87 dist = pkg_resources.get_distribution(pkg)
88 return (dist.has_metadata('INSTALLER') and
89 'pip' in dist.get_metadata_lines('INSTALLER'))
90 except pkg_resources.DistributionNotFound:
91 return False
92
93
94 def pip_version_check(session, options):
95 # type: (PipSession, optparse.Values) -> None
96 """Check for an update for pip.
97
98 Limit the frequency of checks to once per week. State is stored either in
99 the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
100 of the pip script path.
101 """
102 installed_version = get_installed_version("pip")
103 if not installed_version:
104 return
105
106 pip_version = packaging_version.parse(installed_version)
107 pypi_version = None
108
109 try:
110 state = SelfCheckState(cache_dir=options.cache_dir)
111
112 current_time = datetime.datetime.utcnow()
113 # Determine if we need to refresh the state
114 if "last_check" in state.state and "pypi_version" in state.state:
115 last_check = datetime.datetime.strptime(
116 state.state["last_check"],
117 SELFCHECK_DATE_FMT
118 )
119 if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
120 pypi_version = state.state["pypi_version"]
121
122 # Refresh the version if we need to or just see if we need to warn
123 if pypi_version is None:
124 # Lets use PackageFinder to see what the latest pip version is
125 finder = PackageFinder.create(
126 find_links=options.find_links,
127 index_urls=[options.index_url] + options.extra_index_urls,
128 allow_all_prereleases=False, # Explicitly set to False
129 trusted_hosts=options.trusted_hosts,
130 session=session,
131 )
132 candidate = finder.find_candidates("pip").get_best()
133 if candidate is None:
134 return
135 pypi_version = str(candidate.version)
136
137 # save that we've performed a check
138 state.save(pypi_version, current_time)
139
140 remote_version = packaging_version.parse(pypi_version)
141
142 local_version_is_older = (
143 pip_version < remote_version and
144 pip_version.base_version != remote_version.base_version and
145 was_installed_by_pip('pip')
146 )
147
148 # Determine if our pypi_version is older
149 if not local_version_is_older:
150 return
151
152 # Advise "python -m pip" on Windows to avoid issues
153 # with overwriting pip.exe.
154 if WINDOWS:
155 pip_cmd = "python -m pip"
156 else:
157 pip_cmd = "pip"
158 logger.warning(
159 "You are using pip version %s, however version %s is "
160 "available.\nYou should consider upgrading via the "
161 "'%s install --upgrade pip' command.",
162 pip_version, pypi_version, pip_cmd
163 )
164 except Exception:
165 logger.debug(
166 "There was an error checking the latest version of pip",
167 exc_info=True,
168 )
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/utils/outdated.py b/src/pip/_internal/utils/outdated.py
--- a/src/pip/_internal/utils/outdated.py
+++ b/src/pip/_internal/utils/outdated.py
@@ -13,6 +13,7 @@
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import ensure_dir, get_installed_version
+from pip._internal.utils.packaging import get_installer
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
@@ -85,8 +86,7 @@
"""
try:
dist = pkg_resources.get_distribution(pkg)
- return (dist.has_metadata('INSTALLER') and
- 'pip' in dist.get_metadata_lines('INSTALLER'))
+ return "pip" == get_installer(dist)
except pkg_resources.DistributionNotFound:
return False
| {"golden_diff": "diff --git a/src/pip/_internal/utils/outdated.py b/src/pip/_internal/utils/outdated.py\n--- a/src/pip/_internal/utils/outdated.py\n+++ b/src/pip/_internal/utils/outdated.py\n@@ -13,6 +13,7 @@\n from pip._internal.utils.compat import WINDOWS\n from pip._internal.utils.filesystem import check_path_owner\n from pip._internal.utils.misc import ensure_dir, get_installed_version\n+from pip._internal.utils.packaging import get_installer\n from pip._internal.utils.typing import MYPY_CHECK_RUNNING\n \n if MYPY_CHECK_RUNNING:\n@@ -85,8 +86,7 @@\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(pkg)\n- return (dist.has_metadata('INSTALLER') and\n- 'pip' in dist.get_metadata_lines('INSTALLER'))\n+ return \"pip\" == get_installer(dist)\n except pkg_resources.DistributionNotFound:\n return False\n", "issue": "Replace overly specific function definition with more general alternative\nIn `outdated.py`, there is a function definition for `was_installed_by_pip` which can be simplified (or gotten rid of), by using `get_installer` defined in another file.\r\n\r\nhttps://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/outdated.py#L79\r\n\r\nhttps://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/packaging.py#L86\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile, pkg_resources\nfrom pip._vendor.packaging import version as packaging_version\n\nfrom pip._internal.index import PackageFinder\nfrom pip._internal.utils.compat import WINDOWS\nfrom pip._internal.utils.filesystem import check_path_owner\nfrom pip._internal.utils.misc import ensure_dir, get_installed_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n import optparse\n from typing import Any, Dict\n from pip._internal.download import PipSession\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelfCheckState(object):\n def __init__(self, cache_dir):\n # type: (str) -> None\n self.state = {} # type: Dict[str, Any]\n self.statefile_path = None\n\n # Try to load the existing state\n if cache_dir:\n self.statefile_path = os.path.join(cache_dir, \"selfcheck.json\")\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n # Explicitly suppressing exceptions, since we don't want to\n # error out if the cache file is invalid.\n pass\n\n def save(self, pypi_version, current_time):\n # type: (str, datetime.datetime) -> None\n # If we do not have a path to cache in, don't bother saving.\n if not self.statefile_path:\n return\n\n # Check to make sure that we own the directory\n if not check_path_owner(os.path.dirname(self.statefile_path)):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n ensure_dir(os.path.dirname(self.statefile_path))\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n if os.path.exists(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n else:\n state = {}\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef was_installed_by_pip(pkg):\n # type: (str) -> bool\n \"\"\"Checks whether pkg was installed by pip\n\n This is used not to display the upgrade message when pip is in fact\n installed by system package manager, such as dnf on Fedora.\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(pkg)\n return (dist.has_metadata('INSTALLER') and\n 'pip' in dist.get_metadata_lines('INSTALLER'))\n except pkg_resources.DistributionNotFound:\n return False\n\n\ndef pip_version_check(session, options):\n # type: (PipSession, optparse.Values) -> None\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n installed_version = get_installed_version(\"pip\")\n if not installed_version:\n return\n\n pip_version = packaging_version.parse(installed_version)\n pypi_version = None\n\n try:\n state = SelfCheckState(cache_dir=options.cache_dir)\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n # Lets use PackageFinder to see what the latest pip version is\n finder = PackageFinder.create(\n find_links=options.find_links,\n index_urls=[options.index_url] + options.extra_index_urls,\n allow_all_prereleases=False, # Explicitly set to False\n trusted_hosts=options.trusted_hosts,\n session=session,\n )\n candidate = finder.find_candidates(\"pip\").get_best()\n if candidate is None:\n return\n pypi_version = str(candidate.version)\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n remote_version = packaging_version.parse(pypi_version)\n\n local_version_is_older = (\n pip_version < remote_version and\n pip_version.base_version != remote_version.base_version and\n was_installed_by_pip('pip')\n )\n\n # Determine if our pypi_version is older\n if not local_version_is_older:\n return\n\n # Advise \"python -m pip\" on Windows to avoid issues\n # with overwriting pip.exe.\n if WINDOWS:\n pip_cmd = \"python -m pip\"\n else:\n pip_cmd = \"pip\"\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'%s install --upgrade pip' command.\",\n pip_version, pypi_version, pip_cmd\n )\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "src/pip/_internal/utils/outdated.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile, pkg_resources\nfrom pip._vendor.packaging import version as packaging_version\n\nfrom pip._internal.index import PackageFinder\nfrom pip._internal.utils.compat import WINDOWS\nfrom pip._internal.utils.filesystem import check_path_owner\nfrom pip._internal.utils.misc import ensure_dir, get_installed_version\nfrom pip._internal.utils.packaging import get_installer\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n import optparse\n from typing import Any, Dict\n from pip._internal.download import PipSession\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelfCheckState(object):\n def __init__(self, cache_dir):\n # type: (str) -> None\n self.state = {} # type: Dict[str, Any]\n self.statefile_path = None\n\n # Try to load the existing state\n if cache_dir:\n self.statefile_path = os.path.join(cache_dir, \"selfcheck.json\")\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n # Explicitly suppressing exceptions, since we don't want to\n # error out if the cache file is invalid.\n pass\n\n def save(self, pypi_version, current_time):\n # type: (str, datetime.datetime) -> None\n # If we do not have a path to cache in, don't bother saving.\n if not self.statefile_path:\n return\n\n # Check to make sure that we own the directory\n if not check_path_owner(os.path.dirname(self.statefile_path)):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n ensure_dir(os.path.dirname(self.statefile_path))\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n if os.path.exists(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n else:\n state = {}\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef was_installed_by_pip(pkg):\n # type: (str) -> bool\n \"\"\"Checks whether pkg was installed by pip\n\n This is used not to display the upgrade message when pip is in fact\n installed by system package manager, such as dnf on Fedora.\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(pkg)\n return \"pip\" == get_installer(dist)\n except pkg_resources.DistributionNotFound:\n return False\n\n\ndef pip_version_check(session, options):\n # type: (PipSession, optparse.Values) -> None\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n installed_version = get_installed_version(\"pip\")\n if not installed_version:\n return\n\n pip_version = packaging_version.parse(installed_version)\n pypi_version = None\n\n try:\n state = SelfCheckState(cache_dir=options.cache_dir)\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n # Lets use PackageFinder to see what the latest pip version is\n finder = PackageFinder.create(\n find_links=options.find_links,\n index_urls=[options.index_url] + options.extra_index_urls,\n allow_all_prereleases=False, # Explicitly set to False\n trusted_hosts=options.trusted_hosts,\n session=session,\n )\n candidate = finder.find_candidates(\"pip\").get_best()\n if candidate is None:\n return\n pypi_version = str(candidate.version)\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n remote_version = packaging_version.parse(pypi_version)\n\n local_version_is_older = (\n pip_version < remote_version and\n pip_version.base_version != remote_version.base_version and\n was_installed_by_pip('pip')\n )\n\n # Determine if our pypi_version is older\n if not local_version_is_older:\n return\n\n # Advise \"python -m pip\" on Windows to avoid issues\n # with overwriting pip.exe.\n if WINDOWS:\n pip_cmd = \"python -m pip\"\n else:\n pip_cmd = \"pip\"\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'%s install --upgrade pip' command.\",\n pip_version, pypi_version, pip_cmd\n )\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "src/pip/_internal/utils/outdated.py"}]} | 2,112 | 206 |
gh_patches_debug_9126 | rasdani/github-patches | git_diff | sunpy__sunpy-5186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`HMISynopticMap` is missing a docstring
`HMISynopticMap` has no docstring. At minimum, it should at least have something analogous to what `MDISynopticMap` has:
https://github.com/sunpy/sunpy/blob/824f72026403001c65e23f24ced3ff5eb7d13a68/sunpy/map/sources/soho.py#L227-L229
However, it'd be better if it had more meat to it (e.g., explanation and references).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/map/sources/sdo.py`
Content:
```
1 """SDO Map subclass definitions"""
2
3 import numpy as np
4
5 import astropy.units as u
6 from astropy.coordinates import CartesianRepresentation, HeliocentricMeanEcliptic
7 from astropy.visualization import AsinhStretch
8 from astropy.visualization.mpl_normalize import ImageNormalize
9
10 from sunpy import log
11 from sunpy.map import GenericMap
12 from sunpy.map.sources.source_type import source_stretch
13
14 __all__ = ['AIAMap', 'HMIMap', 'HMISynopticMap']
15
16
17 class AIAMap(GenericMap):
18 """AIA Image Map.
19
20 The Atmospheric Imaging Assembly is a set of four telescopes that employ
21 normal-incidence, multi-layer coated optics to provide narrow-band imaging
22 of the Sun. It provides high resolution full-disk images of the corona and
23 transition region up to 0.5 solar radii above the solar limb with 1.5
24 arcsecond angular resolution and 12-second temporal resolution. It observes
25 the Sun in the following seven extreme ultraviolet bandpasses: 94 A
26 (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),
27 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes
28 in the visible 1600 A (C IV) and the nearby continuum (1700 A).
29
30 Notes
31 -----
32 Observer location: The standard AIA FITS header provides the spacecraft location in multiple
33 coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst
34 (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided
35 HGS coordinates, but other software packages may make different choices.
36
37 References
38 ----------
39 * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_
40 * `Instrument Page <https://aia.lmsal.com>`_
41 * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_
42 * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_
43 * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_
44 * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_
45 """
46
47 def __init__(self, data, header, **kwargs):
48 if 'bunit' not in header and 'pixlunit' in header:
49 # PIXLUNIT is not a FITS standard keyword
50 header['bunit'] = header['pixlunit']
51
52 super().__init__(data, header, **kwargs)
53
54 # Fill in some missing info
55 self.meta['detector'] = self.meta.get('detector', "AIA")
56 self._nickname = self.detector
57 self.plot_settings['cmap'] = self._get_cmap_name()
58 self.plot_settings['norm'] = ImageNormalize(
59 stretch=source_stretch(self.meta, AsinhStretch(0.01)), clip=False)
60
61 @property
62 def _supported_observer_coordinates(self):
63 return [(('haex_obs', 'haey_obs', 'haez_obs'), {'x': self.meta.get('haex_obs'),
64 'y': self.meta.get('haey_obs'),
65 'z': self.meta.get('haez_obs'),
66 'unit': u.m,
67 'representation_type': CartesianRepresentation,
68 'frame': HeliocentricMeanEcliptic})
69 ] + super()._supported_observer_coordinates
70
71 @property
72 def observatory(self):
73 """
74 Returns the observatory.
75 """
76 return self.meta.get('telescop', '').split('/')[0]
77
78 @classmethod
79 def is_datasource_for(cls, data, header, **kwargs):
80 """Determines if header corresponds to an AIA image"""
81 return str(header.get('instrume', '')).startswith('AIA')
82
83
84 class HMIMap(GenericMap):
85 """HMI Image Map.
86
87 HMI consists of a refracting telescope, a polarization selector,
88 an image stabilization system, a narrow band tunable filter
89 and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I
90 absorption line at 6173 Angstrom with a resolution of 1 arc-second.
91 HMI takes images in a sequence of tuning and polarizations at a 4-second
92 cadence for each camera. One camera is dedicated to a 45 s Doppler and
93 line-of-sight field sequence while the other to a 90 s vector field
94 sequence.
95
96 References
97 ----------
98 * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_
99 * `Instrument Page <http://hmi.stanford.edu>`_
100 * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_
101 """
102
103 def __init__(self, data, header, **kwargs):
104
105 super().__init__(data, header, **kwargs)
106
107 self.meta['detector'] = self.meta.get('detector', "HMI")
108 self._nickname = self.detector
109
110 @property
111 def measurement(self):
112 """
113 Returns the measurement type.
114 """
115 return self.meta.get('content', '').split(" ")[0].lower()
116
117 @property
118 def observatory(self):
119 """
120 Returns the observatory.
121 """
122 return self.meta.get('telescop', '').split('/')[0]
123
124 @classmethod
125 def is_datasource_for(cls, data, header, **kwargs):
126 """Determines if header corresponds to an HMI image"""
127 return (str(header.get('TELESCOP', '')).endswith('HMI') and
128 not HMISynopticMap.is_datasource_for(data, header))
129
130
131 class HMISynopticMap(HMIMap):
132
133 def __init__(self, data, header, **kwargs):
134 super().__init__(data, header, **kwargs)
135
136 if self.meta['cunit1'] == 'Degree':
137 self.meta['cunit1'] = 'deg'
138
139 if self.meta['cunit2'] == 'Sine Latitude':
140 log.debug("Editing CUNIT2, CDELT1, CDLET2 keywords to the correct "
141 "values for a CEA projection.")
142 self.meta['cunit2'] = 'deg'
143
144 # Since, this map uses the cylindrical equal-area (CEA) projection,
145 # the spacing should be modified to 180/pi times the original value
146 # Reference: Section 5.5, Thompson 2006
147 self.meta['cdelt2'] = 180 / np.pi * self.meta['cdelt2']
148 self.meta['cdelt1'] = np.abs(self.meta['cdelt1'])
149
150 if 'date-obs' not in self.meta and 't_obs' in self.meta:
151 log.debug('Setting "DATE-OBS" keyword from "T_OBS"')
152 self.meta['date-obs'] = self.meta['t_obs']
153
154 @classmethod
155 def is_datasource_for(cls, data, header, **kwargs):
156 """
157 Determines if header corresponds to an HMI synoptic map.
158 """
159 return (str(header.get('TELESCOP', '')).endswith('HMI') and
160 str(header.get('CONTENT', '')) ==
161 'Carrington Synoptic Chart Of Br Field')
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/map/sources/sdo.py b/sunpy/map/sources/sdo.py
--- a/sunpy/map/sources/sdo.py
+++ b/sunpy/map/sources/sdo.py
@@ -129,7 +129,19 @@
class HMISynopticMap(HMIMap):
+ """
+ SDO/HMI Synoptic Map.
+
+ Synoptic maps are constructed from HMI 720s line-of-sight magnetograms
+ collected over a 27-day solar rotation.
+
+ See `~sunpy.map.sources.sdo.HMIMap` for information on the HMI instrument.
+ References
+ ----------
+ * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`__
+ * `JSOC's HMI Synoptic Charts <http://jsoc.stanford.edu/HMI/LOS_Synoptic_charts.html>`__
+ """
def __init__(self, data, header, **kwargs):
super().__init__(data, header, **kwargs)
| {"golden_diff": "diff --git a/sunpy/map/sources/sdo.py b/sunpy/map/sources/sdo.py\n--- a/sunpy/map/sources/sdo.py\n+++ b/sunpy/map/sources/sdo.py\n@@ -129,7 +129,19 @@\n \n \n class HMISynopticMap(HMIMap):\n+ \"\"\"\n+ SDO/HMI Synoptic Map.\n+\n+ Synoptic maps are constructed from HMI 720s line-of-sight magnetograms\n+ collected over a 27-day solar rotation.\n+\n+ See `~sunpy.map.sources.sdo.HMIMap` for information on the HMI instrument.\n \n+ References\n+ ----------\n+ * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`__\n+ * `JSOC's HMI Synoptic Charts <http://jsoc.stanford.edu/HMI/LOS_Synoptic_charts.html>`__\n+ \"\"\"\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n", "issue": "`HMISynopticMap` is missing a docstring\n`HMISynopticMap` has no docstring. At minimum, it should at least have something analogous to what `MDISynopticMap` has:\r\nhttps://github.com/sunpy/sunpy/blob/824f72026403001c65e23f24ced3ff5eb7d13a68/sunpy/map/sources/soho.py#L227-L229\r\n\r\nHowever, it'd be better if it had more meat to it (e.g., explanation and references).\n", "before_files": [{"content": "\"\"\"SDO Map subclass definitions\"\"\"\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation, HeliocentricMeanEcliptic\nfrom astropy.visualization import AsinhStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\nfrom sunpy import log\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['AIAMap', 'HMIMap', 'HMISynopticMap']\n\n\nclass AIAMap(GenericMap):\n \"\"\"AIA Image Map.\n\n The Atmospheric Imaging Assembly is a set of four telescopes that employ\n normal-incidence, multi-layer coated optics to provide narrow-band imaging\n of the Sun. It provides high resolution full-disk images of the corona and\n transition region up to 0.5 solar radii above the solar limb with 1.5\n arcsecond angular resolution and 12-second temporal resolution. It observes\n the Sun in the following seven extreme ultraviolet bandpasses: 94 A\n (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),\n 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes\n in the visible 1600 A (C IV) and the nearby continuum (1700 A).\n\n Notes\n -----\n Observer location: The standard AIA FITS header provides the spacecraft location in multiple\n coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst\n (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided\n HGS coordinates, but other software packages may make different choices.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <https://aia.lmsal.com>`_\n * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_\n * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_\n * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_\n * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n if 'bunit' not in header and 'pixlunit' in header:\n # PIXLUNIT is not a FITS standard keyword\n header['bunit'] = header['pixlunit']\n\n super().__init__(data, header, **kwargs)\n\n # Fill in some missing info\n self.meta['detector'] = self.meta.get('detector', \"AIA\")\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, AsinhStretch(0.01)), clip=False)\n\n @property\n def _supported_observer_coordinates(self):\n return [(('haex_obs', 'haey_obs', 'haez_obs'), {'x': self.meta.get('haex_obs'),\n 'y': self.meta.get('haey_obs'),\n 'z': self.meta.get('haez_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricMeanEcliptic})\n ] + super()._supported_observer_coordinates\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta.get('telescop', '').split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an AIA image\"\"\"\n return str(header.get('instrume', '')).startswith('AIA')\n\n\nclass HMIMap(GenericMap):\n \"\"\"HMI Image Map.\n\n HMI consists of a refracting telescope, a polarization selector,\n an image stabilization system, a narrow band tunable filter\n and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I\n absorption line at 6173 Angstrom with a resolution of 1 arc-second.\n HMI takes images in a sequence of tuning and polarizations at a 4-second\n cadence for each camera. One camera is dedicated to a 45 s Doppler and\n line-of-sight field sequence while the other to a 90 s vector field\n sequence.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <http://hmi.stanford.edu>`_\n * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n super().__init__(data, header, **kwargs)\n\n self.meta['detector'] = self.meta.get('detector', \"HMI\")\n self._nickname = self.detector\n\n @property\n def measurement(self):\n \"\"\"\n Returns the measurement type.\n \"\"\"\n return self.meta.get('content', '').split(\" \")[0].lower()\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta.get('telescop', '').split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an HMI image\"\"\"\n return (str(header.get('TELESCOP', '')).endswith('HMI') and\n not HMISynopticMap.is_datasource_for(data, header))\n\n\nclass HMISynopticMap(HMIMap):\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n\n if self.meta['cunit1'] == 'Degree':\n self.meta['cunit1'] = 'deg'\n\n if self.meta['cunit2'] == 'Sine Latitude':\n log.debug(\"Editing CUNIT2, CDELT1, CDLET2 keywords to the correct \"\n \"values for a CEA projection.\")\n self.meta['cunit2'] = 'deg'\n\n # Since, this map uses the cylindrical equal-area (CEA) projection,\n # the spacing should be modified to 180/pi times the original value\n # Reference: Section 5.5, Thompson 2006\n self.meta['cdelt2'] = 180 / np.pi * self.meta['cdelt2']\n self.meta['cdelt1'] = np.abs(self.meta['cdelt1'])\n\n if 'date-obs' not in self.meta and 't_obs' in self.meta:\n log.debug('Setting \"DATE-OBS\" keyword from \"T_OBS\"')\n self.meta['date-obs'] = self.meta['t_obs']\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"\n Determines if header corresponds to an HMI synoptic map.\n \"\"\"\n return (str(header.get('TELESCOP', '')).endswith('HMI') and\n str(header.get('CONTENT', '')) ==\n 'Carrington Synoptic Chart Of Br Field')\n", "path": "sunpy/map/sources/sdo.py"}], "after_files": [{"content": "\"\"\"SDO Map subclass definitions\"\"\"\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation, HeliocentricMeanEcliptic\nfrom astropy.visualization import AsinhStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\nfrom sunpy import log\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['AIAMap', 'HMIMap', 'HMISynopticMap']\n\n\nclass AIAMap(GenericMap):\n \"\"\"AIA Image Map.\n\n The Atmospheric Imaging Assembly is a set of four telescopes that employ\n normal-incidence, multi-layer coated optics to provide narrow-band imaging\n of the Sun. It provides high resolution full-disk images of the corona and\n transition region up to 0.5 solar radii above the solar limb with 1.5\n arcsecond angular resolution and 12-second temporal resolution. It observes\n the Sun in the following seven extreme ultraviolet bandpasses: 94 A\n (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),\n 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes\n in the visible 1600 A (C IV) and the nearby continuum (1700 A).\n\n Notes\n -----\n Observer location: The standard AIA FITS header provides the spacecraft location in multiple\n coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst\n (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided\n HGS coordinates, but other software packages may make different choices.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <https://aia.lmsal.com>`_\n * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_\n * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_\n * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_\n * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n if 'bunit' not in header and 'pixlunit' in header:\n # PIXLUNIT is not a FITS standard keyword\n header['bunit'] = header['pixlunit']\n\n super().__init__(data, header, **kwargs)\n\n # Fill in some missing info\n self.meta['detector'] = self.meta.get('detector', \"AIA\")\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, AsinhStretch(0.01)), clip=False)\n\n @property\n def _supported_observer_coordinates(self):\n return [(('haex_obs', 'haey_obs', 'haez_obs'), {'x': self.meta.get('haex_obs'),\n 'y': self.meta.get('haey_obs'),\n 'z': self.meta.get('haez_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricMeanEcliptic})\n ] + super()._supported_observer_coordinates\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta.get('telescop', '').split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an AIA image\"\"\"\n return str(header.get('instrume', '')).startswith('AIA')\n\n\nclass HMIMap(GenericMap):\n \"\"\"HMI Image Map.\n\n HMI consists of a refracting telescope, a polarization selector,\n an image stabilization system, a narrow band tunable filter\n and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I\n absorption line at 6173 Angstrom with a resolution of 1 arc-second.\n HMI takes images in a sequence of tuning and polarizations at a 4-second\n cadence for each camera. One camera is dedicated to a 45 s Doppler and\n line-of-sight field sequence while the other to a 90 s vector field\n sequence.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <http://hmi.stanford.edu>`_\n * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n super().__init__(data, header, **kwargs)\n\n self.meta['detector'] = self.meta.get('detector', \"HMI\")\n self._nickname = self.detector\n\n @property\n def measurement(self):\n \"\"\"\n Returns the measurement type.\n \"\"\"\n return self.meta.get('content', '').split(\" \")[0].lower()\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta.get('telescop', '').split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an HMI image\"\"\"\n return (str(header.get('TELESCOP', '')).endswith('HMI') and\n not HMISynopticMap.is_datasource_for(data, header))\n\n\nclass HMISynopticMap(HMIMap):\n \"\"\"\n SDO/HMI Synoptic Map.\n\n Synoptic maps are constructed from HMI 720s line-of-sight magnetograms\n collected over a 27-day solar rotation.\n\n See `~sunpy.map.sources.sdo.HMIMap` for information on the HMI instrument.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`__\n * `JSOC's HMI Synoptic Charts <http://jsoc.stanford.edu/HMI/LOS_Synoptic_charts.html>`__\n \"\"\"\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n\n if self.meta['cunit1'] == 'Degree':\n self.meta['cunit1'] = 'deg'\n\n if self.meta['cunit2'] == 'Sine Latitude':\n log.debug(\"Editing CUNIT2, CDELT1, CDLET2 keywords to the correct \"\n \"values for a CEA projection.\")\n self.meta['cunit2'] = 'deg'\n\n # Since, this map uses the cylindrical equal-area (CEA) projection,\n # the spacing should be modified to 180/pi times the original value\n # Reference: Section 5.5, Thompson 2006\n self.meta['cdelt2'] = 180 / np.pi * self.meta['cdelt2']\n self.meta['cdelt1'] = np.abs(self.meta['cdelt1'])\n\n if 'date-obs' not in self.meta and 't_obs' in self.meta:\n log.debug('Setting \"DATE-OBS\" keyword from \"T_OBS\"')\n self.meta['date-obs'] = self.meta['t_obs']\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"\n Determines if header corresponds to an HMI synoptic map.\n \"\"\"\n return (str(header.get('TELESCOP', '')).endswith('HMI') and\n str(header.get('CONTENT', '')) ==\n 'Carrington Synoptic Chart Of Br Field')\n", "path": "sunpy/map/sources/sdo.py"}]} | 2,500 | 233 |
gh_patches_debug_20315 | rasdani/github-patches | git_diff | Qiskit__qiskit-2302 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multi-language documentation
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected behavior?
I would like to remove all the multi-language documentation from this repository and add it to github.com/Qiskit/qiskit. I am not sure of the best way we do it and I let all discuss in that repository how we should do it. But we are not going to have documentation in the individual elements (only a readme about the element, contributing guidelines and other community files)
@alfrisch could you take the lead on the German
@rraymondhp could you take the lead on the Japanese
@hanheepaik could you take the lead on the Korean
@liupibm could you take the lead on the Chinese
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/de/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 """
4 Language specific configuration file, inheriting from the main /doc
5 conf.py file and adjusting the variables that depend on the language.
6 """
7
8 import os
9 import sys
10
11 sys.path.insert(0, os.path.abspath('..'))
12 from conf import *
13
14 language = 'de'
15 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
16
17 templates_path = ['../_templates']
18 html_static_path = ['../theme/static/']
19 html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
20 html_favicon = '../theme/static/favicon.ico'
21
```
Path: `docs/ja/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 """
4 Language specific configuration file, inheriting from the main /doc
5 conf.py file and adjusting the variables that depend on the language.
6 """
7
8 import os
9 import sys
10
11 sys.path.insert(0, os.path.abspath('..'))
12 from conf import *
13
14 language = 'ja'
15 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
16
17 templates_path = ['../_templates']
18 html_static_path = ['../theme/static/']
19 html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
20 html_favicon = '../theme/static/favicon.ico'
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/de/conf.py b/docs/de/conf.py
deleted file mode 100644
--- a/docs/de/conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Language specific configuration file, inheriting from the main /doc
-conf.py file and adjusting the variables that depend on the language.
-"""
-
-import os
-import sys
-
-sys.path.insert(0, os.path.abspath('..'))
-from conf import *
-
-language = 'de'
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
-
-templates_path = ['../_templates']
-html_static_path = ['../theme/static/']
-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
-html_favicon = '../theme/static/favicon.ico'
diff --git a/docs/ja/conf.py b/docs/ja/conf.py
deleted file mode 100644
--- a/docs/ja/conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Language specific configuration file, inheriting from the main /doc
-conf.py file and adjusting the variables that depend on the language.
-"""
-
-import os
-import sys
-
-sys.path.insert(0, os.path.abspath('..'))
-from conf import *
-
-language = 'ja'
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
-
-templates_path = ['../_templates']
-html_static_path = ['../theme/static/']
-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
-html_favicon = '../theme/static/favicon.ico'
| {"golden_diff": "diff --git a/docs/de/conf.py b/docs/de/conf.py\ndeleted file mode 100644\n--- a/docs/de/conf.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#!/usr/bin/env python3\n-# -*- coding: utf-8 -*-\n-\"\"\"\n-Language specific configuration file, inheriting from the main /doc\n-conf.py file and adjusting the variables that depend on the language.\n-\"\"\"\n-\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath('..'))\n-from conf import *\n-\n-language = 'de'\n-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n-\n-templates_path = ['../_templates']\n-html_static_path = ['../theme/static/']\n-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\n-html_favicon = '../theme/static/favicon.ico'\ndiff --git a/docs/ja/conf.py b/docs/ja/conf.py\ndeleted file mode 100644\n--- a/docs/ja/conf.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#!/usr/bin/env python3\n-# -*- coding: utf-8 -*-\n-\"\"\"\n-Language specific configuration file, inheriting from the main /doc\n-conf.py file and adjusting the variables that depend on the language.\n-\"\"\"\n-\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath('..'))\n-from conf import *\n-\n-language = 'ja'\n-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n-\n-templates_path = ['../_templates']\n-html_static_path = ['../theme/static/']\n-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\n-html_favicon = '../theme/static/favicon.ico'\n", "issue": "multi-language documentation \n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected behavior?\r\nI would like to remove all the multi-language documentation from this repository and add it to github.com/Qiskit/qiskit. I am not sure of the best way we do it and I let all discuss in that repository how we should do it. But we are not going to have documentation in the individual elements (only a readme about the element, contributing guidelines and other community files)\r\n\r\n@alfrisch could you take the lead on the German\r\n@rraymondhp could you take the lead on the Japanese\r\n@hanheepaik could you take the lead on the Korean\r\n@liupibm could you take the lead on the Chinese\r\n\r\nThanks \r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLanguage specific configuration file, inheriting from the main /doc\nconf.py file and adjusting the variables that depend on the language.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom conf import *\n\nlanguage = 'de'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n\ntemplates_path = ['../_templates']\nhtml_static_path = ['../theme/static/']\nhtml_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\nhtml_favicon = '../theme/static/favicon.ico'\n", "path": "docs/de/conf.py"}, {"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLanguage specific configuration file, inheriting from the main /doc\nconf.py file and adjusting the variables that depend on the language.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom conf import *\n\nlanguage = 'ja'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n\ntemplates_path = ['../_templates']\nhtml_static_path = ['../theme/static/']\nhtml_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\nhtml_favicon = '../theme/static/favicon.ico'\n", "path": "docs/ja/conf.py"}], "after_files": [{"content": null, "path": "docs/de/conf.py"}, {"content": null, "path": "docs/ja/conf.py"}]} | 814 | 395 |
gh_patches_debug_43406 | rasdani/github-patches | git_diff | sublimelsp__LSP-707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce impact of LSP color listener by default.
@predragnikolic: Noticed while closing views after find-replace in pyls:
```
Traceback (most recent call last):
File "/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py", line 506, in run_async_view_listener_callback
vel.__class__.__dict__[name](vel)
File "/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py", line 47, in on_activated_async
self.schedule_request()
File "/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py", line 53, in schedule_request
current_point = self.view.sel()[0].begin()
File "/Applications/Sublime Text.app/Contents/MacOS/sublime.py", line 649, in __getitem__
raise IndexError()
IndexError
```
* We should probably fix the above issue (maybe it's rare but I ran into this edge case pretty quickly)
* We could avoid creating ViewEventListeners for a few kinds of irrelevant views: Read-only, transient, unsupported syntaxes ( Maybe `DocumentSyncListener` `is_applicable` is suitable?)
* We could avoid running `LspColorListener`'s `schedule_request` until a session with colorProvider for a given view has been found.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/color.py`
Content:
```
1 import sublime_plugin
2 import sublime
3
4 try:
5 from typing import Any, List, Dict, Callable, Optional
6 assert Any and List and Dict and Callable and Optional
7 except ImportError:
8 pass
9
10 from .core.protocol import Request
11 from .core.url import filename_to_uri
12 from .core.registry import session_for_view
13 from .core.settings import settings
14 from .core.views import range_to_region
15 from .core.protocol import Range
16
17
18 def send_color_request(view, on_response_recieved: 'Callable'):
19 session = session_for_view(view)
20 if not session or not session.has_capability('colorProvider'):
21 # the server doesn't support colors, just return
22 return
23
24 params = {
25 "textDocument": {
26 "uri": filename_to_uri(view.file_name())
27 }
28 }
29 session.client.send_request(
30 Request.documentColor(params),
31 lambda response: on_response_recieved(response))
32
33
34 class LspColorListener(sublime_plugin.ViewEventListener):
35 def __init__(self, view: sublime.View) -> None:
36 super().__init__(view)
37 self.color_phantom_set = None # type: Optional[sublime.PhantomSet]
38 self._stored_point = -1
39
40 @classmethod
41 def is_applicable(cls, _settings):
42 return 'colorProvider' not in settings.disabled_capabilities
43
44 def on_activated_async(self):
45 self.schedule_request()
46
47 def on_modified_async(self):
48 self.schedule_request()
49
50 def schedule_request(self):
51 current_point = self.view.sel()[0].begin()
52 if self._stored_point != current_point:
53 self._stored_point = current_point
54 sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)
55
56 def fire_request(self, current_point: int) -> None:
57 if current_point == self._stored_point:
58 send_color_request(self.view, self.handle_response)
59
60 def handle_response(self, response) -> None:
61 phantoms = []
62 for val in response:
63 color = val['color']
64 red = color['red'] * 255
65 green = color['green'] * 255
66 blue = color['blue'] * 255
67 alpha = color['alpha']
68
69 content = """
70 <div style='padding: 0.4em;
71 margin-top: 0.1em;
72 border: 1px solid color(var(--foreground) alpha(0.25));
73 background-color: rgba({}, {}, {}, {})'>
74 </div>""".format(red, green, blue, alpha)
75
76 range = Range.from_lsp(val['range'])
77 region = range_to_region(range, self.view)
78
79 phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))
80
81 if phantoms:
82 if not self.color_phantom_set:
83 self.color_phantom_set = sublime.PhantomSet(self.view, "lsp_color")
84 self.color_phantom_set.update(phantoms)
85 else:
86 self.color_phantom_set = None
87
88
89 def remove_color_boxes(view):
90 view.erase_phantoms('lsp_color')
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/color.py b/plugin/color.py
--- a/plugin/color.py
+++ b/plugin/color.py
@@ -9,26 +9,12 @@
from .core.protocol import Request
from .core.url import filename_to_uri
-from .core.registry import session_for_view
-from .core.settings import settings
+from .core.registry import session_for_view, config_for_scope
+from .core.settings import settings, client_configs
from .core.views import range_to_region
from .core.protocol import Range
-
-
-def send_color_request(view, on_response_recieved: 'Callable'):
- session = session_for_view(view)
- if not session or not session.has_capability('colorProvider'):
- # the server doesn't support colors, just return
- return
-
- params = {
- "textDocument": {
- "uri": filename_to_uri(view.file_name())
- }
- }
- session.client.send_request(
- Request.documentColor(params),
- lambda response: on_response_recieved(response))
+from .core.configurations import is_supported_syntax
+from .core.documents import is_transient_view
class LspColorListener(sublime_plugin.ViewEventListener):
@@ -36,26 +22,72 @@
super().__init__(view)
self.color_phantom_set = None # type: Optional[sublime.PhantomSet]
self._stored_point = -1
+ self.initialized = False
+ self.enabled = False
@classmethod
def is_applicable(cls, _settings):
- return 'colorProvider' not in settings.disabled_capabilities
+ syntax = _settings.get('syntax')
+ is_supported = syntax and is_supported_syntax(syntax, client_configs.all)
+ disabled_by_user = 'colorProvider' in settings.disabled_capabilities
+ return is_supported and not disabled_by_user
def on_activated_async(self):
- self.schedule_request()
+ if not self.initialized:
+ self.initialize()
+
+ def initialize(self, is_retry=False):
+ config = config_for_scope(self.view)
+ if not config:
+ self.initialized = True # no server enabled, re-open file to activate feature.
+
+ session = session_for_view(self.view)
+ if session:
+ self.initialized = True
+ self.enabled = session.has_capability('colorProvider')
+ if self.enabled:
+ self.send_color_request()
+ elif not is_retry:
+ # session may be starting, try again once in a second.
+ sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)
+ else:
+ self.initialized = True # we retried but still no session available.
def on_modified_async(self):
- self.schedule_request()
+ if self.enabled:
+ self.schedule_request()
def schedule_request(self):
- current_point = self.view.sel()[0].begin()
+ sel = self.view.sel()
+ if len(sel) < 1:
+ return
+
+ current_point = sel[0].begin()
if self._stored_point != current_point:
self._stored_point = current_point
sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)
def fire_request(self, current_point: int) -> None:
if current_point == self._stored_point:
- send_color_request(self.view, self.handle_response)
+ self.send_color_request()
+
+ def send_color_request(self):
+ if is_transient_view(self.view):
+ return
+
+ session = session_for_view(self.view)
+ if not session:
+ return
+
+ params = {
+ "textDocument": {
+ "uri": filename_to_uri(self.view.file_name())
+ }
+ }
+ session.client.send_request(
+ Request.documentColor(params),
+ self.handle_response
+ )
def handle_response(self, response) -> None:
phantoms = []
@@ -68,7 +100,7 @@
content = """
<div style='padding: 0.4em;
- margin-top: 0.1em;
+ margin-top: 0.2em;
border: 1px solid color(var(--foreground) alpha(0.25));
background-color: rgba({}, {}, {}, {})'>
</div>""".format(red, green, blue, alpha)
| {"golden_diff": "diff --git a/plugin/color.py b/plugin/color.py\n--- a/plugin/color.py\n+++ b/plugin/color.py\n@@ -9,26 +9,12 @@\n \n from .core.protocol import Request\n from .core.url import filename_to_uri\n-from .core.registry import session_for_view\n-from .core.settings import settings\n+from .core.registry import session_for_view, config_for_scope\n+from .core.settings import settings, client_configs\n from .core.views import range_to_region\n from .core.protocol import Range\n-\n-\n-def send_color_request(view, on_response_recieved: 'Callable'):\n- session = session_for_view(view)\n- if not session or not session.has_capability('colorProvider'):\n- # the server doesn't support colors, just return\n- return\n-\n- params = {\n- \"textDocument\": {\n- \"uri\": filename_to_uri(view.file_name())\n- }\n- }\n- session.client.send_request(\n- Request.documentColor(params),\n- lambda response: on_response_recieved(response))\n+from .core.configurations import is_supported_syntax\n+from .core.documents import is_transient_view\n \n \n class LspColorListener(sublime_plugin.ViewEventListener):\n@@ -36,26 +22,72 @@\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n+ self.initialized = False\n+ self.enabled = False\n \n @classmethod\n def is_applicable(cls, _settings):\n- return 'colorProvider' not in settings.disabled_capabilities\n+ syntax = _settings.get('syntax')\n+ is_supported = syntax and is_supported_syntax(syntax, client_configs.all)\n+ disabled_by_user = 'colorProvider' in settings.disabled_capabilities\n+ return is_supported and not disabled_by_user\n \n def on_activated_async(self):\n- self.schedule_request()\n+ if not self.initialized:\n+ self.initialize()\n+\n+ def initialize(self, is_retry=False):\n+ config = config_for_scope(self.view)\n+ if not config:\n+ self.initialized = True # no server enabled, re-open file to activate feature.\n+\n+ session = session_for_view(self.view)\n+ if session:\n+ self.initialized = True\n+ self.enabled = session.has_capability('colorProvider')\n+ if self.enabled:\n+ self.send_color_request()\n+ elif not is_retry:\n+ # session may be starting, try again once in a second.\n+ sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)\n+ else:\n+ self.initialized = True # we retried but still no session available.\n \n def on_modified_async(self):\n- self.schedule_request()\n+ if self.enabled:\n+ self.schedule_request()\n \n def schedule_request(self):\n- current_point = self.view.sel()[0].begin()\n+ sel = self.view.sel()\n+ if len(sel) < 1:\n+ return\n+\n+ current_point = sel[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n \n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n- send_color_request(self.view, self.handle_response)\n+ self.send_color_request()\n+\n+ def send_color_request(self):\n+ if is_transient_view(self.view):\n+ return\n+\n+ session = session_for_view(self.view)\n+ if not session:\n+ return\n+\n+ params = {\n+ \"textDocument\": {\n+ \"uri\": filename_to_uri(self.view.file_name())\n+ }\n+ }\n+ session.client.send_request(\n+ Request.documentColor(params),\n+ self.handle_response\n+ )\n \n def handle_response(self, response) -> None:\n phantoms = []\n@@ -68,7 +100,7 @@\n \n content = \"\"\"\n <div style='padding: 0.4em;\n- margin-top: 0.1em;\n+ margin-top: 0.2em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n", "issue": "Reduce impact of LSP color listener by default.\n@predragnikolic: Noticed while closing views after find-replace in pyls:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py\", line 506, in run_async_view_listener_callback\r\n vel.__class__.__dict__[name](vel)\r\n File \"/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py\", line 47, in on_activated_async\r\n self.schedule_request()\r\n File \"/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py\", line 53, in schedule_request\r\n current_point = self.view.sel()[0].begin()\r\n File \"/Applications/Sublime Text.app/Contents/MacOS/sublime.py\", line 649, in __getitem__\r\n raise IndexError()\r\nIndexError\r\n```\r\n\r\n* We should probably fix the above issue (maybe it's rare but I ran into this edge case pretty quickly)\r\n* We could avoid creating ViewEventListeners for a few kinds of irrelevant views: Read-only, transient, unsupported syntaxes ( Maybe `DocumentSyncListener` `is_applicable` is suitable?)\r\n* We could avoid running `LspColorListener`'s `schedule_request` until a session with colorProvider for a given view has been found.\r\n\n", "before_files": [{"content": "import sublime_plugin\nimport sublime\n\ntry:\n from typing import Any, List, Dict, Callable, Optional\n assert Any and List and Dict and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.url import filename_to_uri\nfrom .core.registry import session_for_view\nfrom .core.settings import settings\nfrom .core.views import range_to_region\nfrom .core.protocol import Range\n\n\ndef send_color_request(view, on_response_recieved: 'Callable'):\n session = session_for_view(view)\n if not session or not session.has_capability('colorProvider'):\n # the server doesn't support colors, just return\n return\n\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(view.file_name())\n }\n }\n session.client.send_request(\n Request.documentColor(params),\n lambda response: on_response_recieved(response))\n\n\nclass LspColorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n\n @classmethod\n def is_applicable(cls, _settings):\n return 'colorProvider' not in settings.disabled_capabilities\n\n def on_activated_async(self):\n self.schedule_request()\n\n def on_modified_async(self):\n self.schedule_request()\n\n def schedule_request(self):\n current_point = self.view.sel()[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n\n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n send_color_request(self.view, self.handle_response)\n\n def handle_response(self, response) -> None:\n phantoms = []\n for val in response:\n color = val['color']\n red = color['red'] * 255\n green = color['green'] * 255\n blue = color['blue'] * 255\n alpha = color['alpha']\n\n content = \"\"\"\n <div style='padding: 0.4em;\n margin-top: 0.1em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n\n range = Range.from_lsp(val['range'])\n region = range_to_region(range, self.view)\n\n phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))\n\n if phantoms:\n if not self.color_phantom_set:\n self.color_phantom_set = sublime.PhantomSet(self.view, \"lsp_color\")\n self.color_phantom_set.update(phantoms)\n else:\n self.color_phantom_set = None\n\n\ndef remove_color_boxes(view):\n view.erase_phantoms('lsp_color')\n", "path": "plugin/color.py"}], "after_files": [{"content": "import sublime_plugin\nimport sublime\n\ntry:\n from typing import Any, List, Dict, Callable, Optional\n assert Any and List and Dict and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.url import filename_to_uri\nfrom .core.registry import session_for_view, config_for_scope\nfrom .core.settings import settings, client_configs\nfrom .core.views import range_to_region\nfrom .core.protocol import Range\nfrom .core.configurations import is_supported_syntax\nfrom .core.documents import is_transient_view\n\n\nclass LspColorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n self.initialized = False\n self.enabled = False\n\n @classmethod\n def is_applicable(cls, _settings):\n syntax = _settings.get('syntax')\n is_supported = syntax and is_supported_syntax(syntax, client_configs.all)\n disabled_by_user = 'colorProvider' in settings.disabled_capabilities\n return is_supported and not disabled_by_user\n\n def on_activated_async(self):\n if not self.initialized:\n self.initialize()\n\n def initialize(self, is_retry=False):\n config = config_for_scope(self.view)\n if not config:\n self.initialized = True # no server enabled, re-open file to activate feature.\n\n session = session_for_view(self.view)\n if session:\n self.initialized = True\n self.enabled = session.has_capability('colorProvider')\n if self.enabled:\n self.send_color_request()\n elif not is_retry:\n # session may be starting, try again once in a second.\n sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)\n else:\n self.initialized = True # we retried but still no session available.\n\n def on_modified_async(self):\n if self.enabled:\n self.schedule_request()\n\n def schedule_request(self):\n sel = self.view.sel()\n if len(sel) < 1:\n return\n\n current_point = sel[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n\n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n self.send_color_request()\n\n def send_color_request(self):\n if is_transient_view(self.view):\n return\n\n session = session_for_view(self.view)\n if not session:\n return\n\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n }\n }\n session.client.send_request(\n Request.documentColor(params),\n self.handle_response\n )\n\n def handle_response(self, response) -> None:\n phantoms = []\n for val in response:\n color = val['color']\n red = color['red'] * 255\n green = color['green'] * 255\n blue = color['blue'] * 255\n alpha = color['alpha']\n\n content = \"\"\"\n <div style='padding: 0.4em;\n margin-top: 0.2em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n\n range = Range.from_lsp(val['range'])\n region = range_to_region(range, self.view)\n\n phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))\n\n if phantoms:\n if not self.color_phantom_set:\n self.color_phantom_set = sublime.PhantomSet(self.view, \"lsp_color\")\n self.color_phantom_set.update(phantoms)\n else:\n self.color_phantom_set = None\n\n\ndef remove_color_boxes(view):\n view.erase_phantoms('lsp_color')\n", "path": "plugin/color.py"}]} | 1,393 | 959 |
gh_patches_debug_21965 | rasdani/github-patches | git_diff | apache__tvm-10188 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] RPC Server Can't Serve Through Proxy Because of Missing Command Line Option
Now we have RPC server, proxy, and tracker, so if the host machine can't access the RPC server directly, then we can put RPC proxy between host machine and RPC server like "Host --> RPC Proxy --> RPC Server", we can do it from Python API, but we can't do it through command line. Because "tvm.exec.rpc_server" haven't exposed the parameter "is_proxy" of class "tvm.rpc.server.Server" through command line option.
### Expected behavior
Can register the RPC server to RPC proxy from command line like something below.
```shell
python -m tvm.exec.rpc_server --host 0.0.0.0 --port 9090 --through-proxy
```
### Actual behavior
Currently haven't any command line option to set the parameter "is_proxy".
### Environment
No
### Steps to reproduce
Any
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/tvm/exec/rpc_server.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 # pylint: disable=redefined-outer-name, invalid-name
18 """Start an RPC server"""
19 import argparse
20 import logging
21 from .. import rpc
22
23
24 def main(args):
25 """Main function
26
27 Parameters
28 ----------
29 args : argparse.Namespace
30 parsed args from command-line invocation
31 """
32 if args.tracker:
33 url, port = args.tracker.rsplit(":", 1)
34 port = int(port)
35 tracker_addr = (url, port)
36 if not args.key:
37 raise RuntimeError("Need key to present type of resource when tracker is available")
38 else:
39 tracker_addr = None
40
41 server = rpc.Server(
42 args.host,
43 args.port,
44 args.port_end,
45 key=args.key,
46 tracker_addr=tracker_addr,
47 load_library=args.load_library,
48 custom_addr=args.custom_addr,
49 silent=args.silent,
50 no_fork=not args.fork,
51 )
52 server.proc.join()
53
54
55 if __name__ == "__main__":
56 parser = argparse.ArgumentParser()
57 parser.add_argument(
58 "--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
59 )
60 parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
61 parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
62 parser.add_argument(
63 "--tracker",
64 type=str,
65 help=("The address of RPC tracker in host:port format. " "e.g. (10.77.1.234:9190)"),
66 )
67 parser.add_argument(
68 "--key", type=str, default="", help="The key used to identify the device type in tracker."
69 )
70 parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
71 parser.add_argument("--load-library", type=str, help="Additional library to load")
72 parser.add_argument(
73 "--no-fork",
74 dest="fork",
75 action="store_false",
76 help="Use spawn mode to avoid fork. This option \
77 is able to avoid potential fork problems with Metal, OpenCL \
78 and ROCM compilers.",
79 )
80 parser.add_argument(
81 "--custom-addr", type=str, help="Custom IP Address to Report to RPC Tracker"
82 )
83
84 parser.set_defaults(fork=True)
85 args = parser.parse_args()
86 logging.basicConfig(level=logging.INFO)
87 if not args.fork is False and not args.silent:
88 logging.info(
89 "If you are running ROCM/Metal, fork will cause "
90 "compiler internal error. Try to launch with arg ```--no-fork```"
91 )
92 main(args)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py
--- a/python/tvm/exec/rpc_server.py
+++ b/python/tvm/exec/rpc_server.py
@@ -42,6 +42,7 @@
args.host,
args.port,
args.port_end,
+ is_proxy=args.through_proxy,
key=args.key,
tracker_addr=tracker_addr,
load_library=args.load_library,
@@ -58,6 +59,15 @@
"--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
)
parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
+ parser.add_argument(
+ "--through-proxy",
+ dest="through_proxy",
+ action="store_true",
+ help=(
+ "Whether this server provide service through a proxy. If this is true, the host and"
+ "port actually is the address of the proxy."
+ ),
+ )
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--tracker",
| {"golden_diff": "diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py\n--- a/python/tvm/exec/rpc_server.py\n+++ b/python/tvm/exec/rpc_server.py\n@@ -42,6 +42,7 @@\n args.host,\n args.port,\n args.port_end,\n+ is_proxy=args.through_proxy,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n@@ -58,6 +59,15 @@\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n+ parser.add_argument(\n+ \"--through-proxy\",\n+ dest=\"through_proxy\",\n+ action=\"store_true\",\n+ help=(\n+ \"Whether this server provide service through a proxy. If this is true, the host and\"\n+ \"port actually is the address of the proxy.\"\n+ ),\n+ )\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n", "issue": "[Bug] RPC Server Can't Serve Through Proxy Because of Missing Command Line Option\nNow we have RPC server, proxy, and tracker, so if the host machine can't access the RPC server directly, then we can put RPC proxy between host machine and RPC server like \"Host --> RPC Proxy --> RPC Server\", we can do it from Python API, but we can't do it through command line. Because \"tvm.exec.rpc_server\" haven't exposed the parameter \"is_proxy\" of class \"tvm.rpc.server.Server\" through command line option.\r\n\r\n### Expected behavior\r\n\r\nCan register the RPC server to RPC proxy from command line like something below.\r\n```shell\r\npython -m tvm.exec.rpc_server --host 0.0.0.0 --port 9090 --through-proxy\r\n```\r\n\r\n### Actual behavior\r\n\r\nCurrently haven't any command line option to set the parameter \"is_proxy\".\r\n\r\n### Environment\r\n\r\nNo\r\n\r\n### Steps to reproduce\r\n\r\nAny\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=redefined-outer-name, invalid-name\n\"\"\"Start an RPC server\"\"\"\nimport argparse\nimport logging\nfrom .. import rpc\n\n\ndef main(args):\n \"\"\"Main function\n\n Parameters\n ----------\n args : argparse.Namespace\n parsed args from command-line invocation\n \"\"\"\n if args.tracker:\n url, port = args.tracker.rsplit(\":\", 1)\n port = int(port)\n tracker_addr = (url, port)\n if not args.key:\n raise RuntimeError(\"Need key to present type of resource when tracker is available\")\n else:\n tracker_addr = None\n\n server = rpc.Server(\n args.host,\n args.port,\n args.port_end,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n custom_addr=args.custom_addr,\n silent=args.silent,\n no_fork=not args.fork,\n )\n server.proc.join()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n type=str,\n help=(\"The address of RPC tracker in host:port format. \" \"e.g. (10.77.1.234:9190)\"),\n )\n parser.add_argument(\n \"--key\", type=str, default=\"\", help=\"The key used to identify the device type in tracker.\"\n )\n parser.add_argument(\"--silent\", action=\"store_true\", help=\"Whether run in silent mode.\")\n parser.add_argument(\"--load-library\", type=str, help=\"Additional library to load\")\n parser.add_argument(\n \"--no-fork\",\n dest=\"fork\",\n action=\"store_false\",\n help=\"Use spawn mode to avoid fork. This option \\\n is able to avoid potential fork problems with Metal, OpenCL \\\n and ROCM compilers.\",\n )\n parser.add_argument(\n \"--custom-addr\", type=str, help=\"Custom IP Address to Report to RPC Tracker\"\n )\n\n parser.set_defaults(fork=True)\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO)\n if not args.fork is False and not args.silent:\n logging.info(\n \"If you are running ROCM/Metal, fork will cause \"\n \"compiler internal error. Try to launch with arg ```--no-fork```\"\n )\n main(args)\n", "path": "python/tvm/exec/rpc_server.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=redefined-outer-name, invalid-name\n\"\"\"Start an RPC server\"\"\"\nimport argparse\nimport logging\nfrom .. import rpc\n\n\ndef main(args):\n \"\"\"Main function\n\n Parameters\n ----------\n args : argparse.Namespace\n parsed args from command-line invocation\n \"\"\"\n if args.tracker:\n url, port = args.tracker.rsplit(\":\", 1)\n port = int(port)\n tracker_addr = (url, port)\n if not args.key:\n raise RuntimeError(\"Need key to present type of resource when tracker is available\")\n else:\n tracker_addr = None\n\n server = rpc.Server(\n args.host,\n args.port,\n args.port_end,\n is_proxy=args.through_proxy,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n custom_addr=args.custom_addr,\n silent=args.silent,\n no_fork=not args.fork,\n )\n server.proc.join()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n parser.add_argument(\n \"--through-proxy\",\n dest=\"through_proxy\",\n action=\"store_true\",\n help=(\n \"Whether this server provide service through a proxy. If this is true, the host and\"\n \"port actually is the address of the proxy.\"\n ),\n )\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n type=str,\n help=(\"The address of RPC tracker in host:port format. \" \"e.g. (10.77.1.234:9190)\"),\n )\n parser.add_argument(\n \"--key\", type=str, default=\"\", help=\"The key used to identify the device type in tracker.\"\n )\n parser.add_argument(\"--silent\", action=\"store_true\", help=\"Whether run in silent mode.\")\n parser.add_argument(\"--load-library\", type=str, help=\"Additional library to load\")\n parser.add_argument(\n \"--no-fork\",\n dest=\"fork\",\n action=\"store_false\",\n help=\"Use spawn mode to avoid fork. This option \\\n is able to avoid potential fork problems with Metal, OpenCL \\\n and ROCM compilers.\",\n )\n parser.add_argument(\n \"--custom-addr\", type=str, help=\"Custom IP Address to Report to RPC Tracker\"\n )\n\n parser.set_defaults(fork=True)\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO)\n if not args.fork is False and not args.silent:\n logging.info(\n \"If you are running ROCM/Metal, fork will cause \"\n \"compiler internal error. Try to launch with arg ```--no-fork```\"\n )\n main(args)\n", "path": "python/tvm/exec/rpc_server.py"}]} | 1,397 | 271 |
gh_patches_debug_31182 | rasdani/github-patches | git_diff | pypa__pip-10145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use sysconfig.get_preferred_scheme etc.
Todo to self. This has been implemented for 3.10 and we should support is in 21.2.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/locations/_sysconfig.py`
Content:
```
1 import distutils.util # FIXME: For change_root.
2 import logging
3 import os
4 import sys
5 import sysconfig
6 import typing
7
8 from pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid
9 from pip._internal.models.scheme import SCHEME_KEYS, Scheme
10 from pip._internal.utils.virtualenv import running_under_virtualenv
11
12 from .base import get_major_minor_version, is_osx_framework
13
14 logger = logging.getLogger(__name__)
15
16
17 # Notes on _infer_* functions.
18 # Unfortunately ``_get_default_scheme()`` is private, so there's no way to
19 # ask things like "what is the '_prefix' scheme on this platform". These
20 # functions try to answer that with some heuristics while accounting for ad-hoc
21 # platforms not covered by CPython's default sysconfig implementation. If the
22 # ad-hoc implementation does not fully implement sysconfig, we'll fall back to
23 # a POSIX scheme.
24
25 _AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())
26
27
28 def _infer_prefix() -> str:
29 """Try to find a prefix scheme for the current platform.
30
31 This tries:
32
33 * A special ``osx_framework_library`` for Python distributed by Apple's
34 Command Line Tools, when not running in a virtual environment.
35 * Implementation + OS, used by PyPy on Windows (``pypy_nt``).
36 * Implementation without OS, used by PyPy on POSIX (``pypy``).
37 * OS + "prefix", used by CPython on POSIX (``posix_prefix``).
38 * Just the OS name, used by CPython on Windows (``nt``).
39
40 If none of the above works, fall back to ``posix_prefix``.
41 """
42 os_framework_global = is_osx_framework() and not running_under_virtualenv()
43 if os_framework_global and "osx_framework_library" in _AVAILABLE_SCHEMES:
44 return "osx_framework_library"
45 implementation_suffixed = f"{sys.implementation.name}_{os.name}"
46 if implementation_suffixed in _AVAILABLE_SCHEMES:
47 return implementation_suffixed
48 if sys.implementation.name in _AVAILABLE_SCHEMES:
49 return sys.implementation.name
50 suffixed = f"{os.name}_prefix"
51 if suffixed in _AVAILABLE_SCHEMES:
52 return suffixed
53 if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called "nt".
54 return os.name
55 return "posix_prefix"
56
57
58 def _infer_user() -> str:
59 """Try to find a user scheme for the current platform."""
60 if is_osx_framework() and not running_under_virtualenv():
61 suffixed = "osx_framework_user"
62 else:
63 suffixed = f"{os.name}_user"
64 if suffixed in _AVAILABLE_SCHEMES:
65 return suffixed
66 if "posix_user" not in _AVAILABLE_SCHEMES: # User scheme unavailable.
67 raise UserInstallationInvalid()
68 return "posix_user"
69
70
71 def _infer_home() -> str:
72 """Try to find a home for the current platform."""
73 suffixed = f"{os.name}_home"
74 if suffixed in _AVAILABLE_SCHEMES:
75 return suffixed
76 return "posix_home"
77
78
79 # Update these keys if the user sets a custom home.
80 _HOME_KEYS = [
81 "installed_base",
82 "base",
83 "installed_platbase",
84 "platbase",
85 "prefix",
86 "exec_prefix",
87 ]
88 if sysconfig.get_config_var("userbase") is not None:
89 _HOME_KEYS.append("userbase")
90
91
92 def get_scheme(
93 dist_name: str,
94 user: bool = False,
95 home: typing.Optional[str] = None,
96 root: typing.Optional[str] = None,
97 isolated: bool = False,
98 prefix: typing.Optional[str] = None,
99 ) -> Scheme:
100 """
101 Get the "scheme" corresponding to the input parameters.
102
103 :param dist_name: the name of the package to retrieve the scheme for, used
104 in the headers scheme path
105 :param user: indicates to use the "user" scheme
106 :param home: indicates to use the "home" scheme
107 :param root: root under which other directories are re-based
108 :param isolated: ignored, but kept for distutils compatibility (where
109 this controls whether the user-site pydistutils.cfg is honored)
110 :param prefix: indicates to use the "prefix" scheme and provides the
111 base directory for the same
112 """
113 if user and prefix:
114 raise InvalidSchemeCombination("--user", "--prefix")
115 if home and prefix:
116 raise InvalidSchemeCombination("--home", "--prefix")
117
118 if home is not None:
119 scheme_name = _infer_home()
120 elif user:
121 scheme_name = _infer_user()
122 else:
123 scheme_name = _infer_prefix()
124
125 if home is not None:
126 variables = {k: home for k in _HOME_KEYS}
127 elif prefix is not None:
128 variables = {k: prefix for k in _HOME_KEYS}
129 else:
130 variables = {}
131
132 paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)
133
134 # Logic here is very arbitrary, we're doing it for compatibility, don't ask.
135 # 1. Pip historically uses a special header path in virtual environments.
136 # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We
137 # only do the same when not running in a virtual environment because
138 # pip's historical header path logic (see point 1) did not do this.
139 if running_under_virtualenv():
140 if user:
141 base = variables.get("userbase", sys.prefix)
142 else:
143 base = variables.get("base", sys.prefix)
144 python_xy = f"python{get_major_minor_version()}"
145 paths["include"] = os.path.join(base, "include", "site", python_xy)
146 elif not dist_name:
147 dist_name = "UNKNOWN"
148
149 scheme = Scheme(
150 platlib=paths["platlib"],
151 purelib=paths["purelib"],
152 headers=os.path.join(paths["include"], dist_name),
153 scripts=paths["scripts"],
154 data=paths["data"],
155 )
156 if root is not None:
157 for key in SCHEME_KEYS:
158 value = distutils.util.change_root(root, getattr(scheme, key))
159 setattr(scheme, key, value)
160 return scheme
161
162
163 def get_bin_prefix() -> str:
164 # Forcing to use /usr/local/bin for standard macOS framework installs.
165 if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/":
166 return "/usr/local/bin"
167 return sysconfig.get_paths()["scripts"]
168
169
170 def get_purelib() -> str:
171 return sysconfig.get_paths()["purelib"]
172
173
174 def get_platlib() -> str:
175 return sysconfig.get_paths()["platlib"]
176
177
178 def get_prefixed_libs(prefix: str) -> typing.Tuple[str, str]:
179 paths = sysconfig.get_paths(vars={"base": prefix, "platbase": prefix})
180 return (paths["purelib"], paths["platlib"])
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/locations/_sysconfig.py b/src/pip/_internal/locations/_sysconfig.py
--- a/src/pip/_internal/locations/_sysconfig.py
+++ b/src/pip/_internal/locations/_sysconfig.py
@@ -24,6 +24,8 @@
_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())
+_HAS_PREFERRED_SCHEME_API = sys.version_info >= (3, 10)
+
def _infer_prefix() -> str:
"""Try to find a prefix scheme for the current platform.
@@ -39,6 +41,8 @@
If none of the above works, fall back to ``posix_prefix``.
"""
+ if _HAS_PREFERRED_SCHEME_API:
+ return sysconfig.get_preferred_scheme("prefix") # type: ignore
os_framework_global = is_osx_framework() and not running_under_virtualenv()
if os_framework_global and "osx_framework_library" in _AVAILABLE_SCHEMES:
return "osx_framework_library"
@@ -57,6 +61,8 @@
def _infer_user() -> str:
"""Try to find a user scheme for the current platform."""
+ if _HAS_PREFERRED_SCHEME_API:
+ return sysconfig.get_preferred_scheme("user") # type: ignore
if is_osx_framework() and not running_under_virtualenv():
suffixed = "osx_framework_user"
else:
@@ -70,6 +76,8 @@
def _infer_home() -> str:
"""Try to find a home for the current platform."""
+ if _HAS_PREFERRED_SCHEME_API:
+ return sysconfig.get_preferred_scheme("home") # type: ignore
suffixed = f"{os.name}_home"
if suffixed in _AVAILABLE_SCHEMES:
return suffixed
| {"golden_diff": "diff --git a/src/pip/_internal/locations/_sysconfig.py b/src/pip/_internal/locations/_sysconfig.py\n--- a/src/pip/_internal/locations/_sysconfig.py\n+++ b/src/pip/_internal/locations/_sysconfig.py\n@@ -24,6 +24,8 @@\n \n _AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())\n \n+_HAS_PREFERRED_SCHEME_API = sys.version_info >= (3, 10)\n+\n \n def _infer_prefix() -> str:\n \"\"\"Try to find a prefix scheme for the current platform.\n@@ -39,6 +41,8 @@\n \n If none of the above works, fall back to ``posix_prefix``.\n \"\"\"\n+ if _HAS_PREFERRED_SCHEME_API:\n+ return sysconfig.get_preferred_scheme(\"prefix\") # type: ignore\n os_framework_global = is_osx_framework() and not running_under_virtualenv()\n if os_framework_global and \"osx_framework_library\" in _AVAILABLE_SCHEMES:\n return \"osx_framework_library\"\n@@ -57,6 +61,8 @@\n \n def _infer_user() -> str:\n \"\"\"Try to find a user scheme for the current platform.\"\"\"\n+ if _HAS_PREFERRED_SCHEME_API:\n+ return sysconfig.get_preferred_scheme(\"user\") # type: ignore\n if is_osx_framework() and not running_under_virtualenv():\n suffixed = \"osx_framework_user\"\n else:\n@@ -70,6 +76,8 @@\n \n def _infer_home() -> str:\n \"\"\"Try to find a home for the current platform.\"\"\"\n+ if _HAS_PREFERRED_SCHEME_API:\n+ return sysconfig.get_preferred_scheme(\"home\") # type: ignore\n suffixed = f\"{os.name}_home\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n", "issue": "Use sysconfig.get_preferred_scheme etc.\nTodo to self. This has been implemented for 3.10 and we should support is in 21.2.\n", "before_files": [{"content": "import distutils.util # FIXME: For change_root.\nimport logging\nimport os\nimport sys\nimport sysconfig\nimport typing\n\nfrom pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid\nfrom pip._internal.models.scheme import SCHEME_KEYS, Scheme\nfrom pip._internal.utils.virtualenv import running_under_virtualenv\n\nfrom .base import get_major_minor_version, is_osx_framework\n\nlogger = logging.getLogger(__name__)\n\n\n# Notes on _infer_* functions.\n# Unfortunately ``_get_default_scheme()`` is private, so there's no way to\n# ask things like \"what is the '_prefix' scheme on this platform\". These\n# functions try to answer that with some heuristics while accounting for ad-hoc\n# platforms not covered by CPython's default sysconfig implementation. If the\n# ad-hoc implementation does not fully implement sysconfig, we'll fall back to\n# a POSIX scheme.\n\n_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())\n\n\ndef _infer_prefix() -> str:\n \"\"\"Try to find a prefix scheme for the current platform.\n\n This tries:\n\n * A special ``osx_framework_library`` for Python distributed by Apple's\n Command Line Tools, when not running in a virtual environment.\n * Implementation + OS, used by PyPy on Windows (``pypy_nt``).\n * Implementation without OS, used by PyPy on POSIX (``pypy``).\n * OS + \"prefix\", used by CPython on POSIX (``posix_prefix``).\n * Just the OS name, used by CPython on Windows (``nt``).\n\n If none of the above works, fall back to ``posix_prefix``.\n \"\"\"\n os_framework_global = is_osx_framework() and not running_under_virtualenv()\n if os_framework_global and \"osx_framework_library\" in _AVAILABLE_SCHEMES:\n return \"osx_framework_library\"\n implementation_suffixed = f\"{sys.implementation.name}_{os.name}\"\n if implementation_suffixed in _AVAILABLE_SCHEMES:\n return implementation_suffixed\n if sys.implementation.name in _AVAILABLE_SCHEMES:\n return sys.implementation.name\n suffixed = f\"{os.name}_prefix\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called \"nt\".\n return os.name\n return \"posix_prefix\"\n\n\ndef _infer_user() -> str:\n \"\"\"Try to find a user scheme for the current platform.\"\"\"\n if is_osx_framework() and not running_under_virtualenv():\n suffixed = \"osx_framework_user\"\n else:\n suffixed = f\"{os.name}_user\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n if \"posix_user\" not in _AVAILABLE_SCHEMES: # User scheme unavailable.\n raise UserInstallationInvalid()\n return \"posix_user\"\n\n\ndef _infer_home() -> str:\n \"\"\"Try to find a home for the current platform.\"\"\"\n suffixed = f\"{os.name}_home\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n return \"posix_home\"\n\n\n# Update these keys if the user sets a custom home.\n_HOME_KEYS = [\n \"installed_base\",\n \"base\",\n \"installed_platbase\",\n \"platbase\",\n \"prefix\",\n \"exec_prefix\",\n]\nif sysconfig.get_config_var(\"userbase\") is not None:\n _HOME_KEYS.append(\"userbase\")\n\n\ndef get_scheme(\n dist_name: str,\n user: bool = False,\n home: typing.Optional[str] = None,\n root: typing.Optional[str] = None,\n isolated: bool = False,\n prefix: typing.Optional[str] = None,\n) -> Scheme:\n \"\"\"\n Get the \"scheme\" corresponding to the input parameters.\n\n :param dist_name: the name of the package to retrieve the scheme for, used\n in the headers scheme path\n :param user: indicates to use the \"user\" scheme\n :param home: indicates to use the \"home\" scheme\n :param root: root under which other directories are re-based\n :param isolated: ignored, but kept for distutils compatibility (where\n this controls whether the user-site pydistutils.cfg is honored)\n :param prefix: indicates to use the \"prefix\" scheme and provides the\n base directory for the same\n \"\"\"\n if user and prefix:\n raise InvalidSchemeCombination(\"--user\", \"--prefix\")\n if home and prefix:\n raise InvalidSchemeCombination(\"--home\", \"--prefix\")\n\n if home is not None:\n scheme_name = _infer_home()\n elif user:\n scheme_name = _infer_user()\n else:\n scheme_name = _infer_prefix()\n\n if home is not None:\n variables = {k: home for k in _HOME_KEYS}\n elif prefix is not None:\n variables = {k: prefix for k in _HOME_KEYS}\n else:\n variables = {}\n\n paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)\n\n # Logic here is very arbitrary, we're doing it for compatibility, don't ask.\n # 1. Pip historically uses a special header path in virtual environments.\n # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We\n # only do the same when not running in a virtual environment because\n # pip's historical header path logic (see point 1) did not do this.\n if running_under_virtualenv():\n if user:\n base = variables.get(\"userbase\", sys.prefix)\n else:\n base = variables.get(\"base\", sys.prefix)\n python_xy = f\"python{get_major_minor_version()}\"\n paths[\"include\"] = os.path.join(base, \"include\", \"site\", python_xy)\n elif not dist_name:\n dist_name = \"UNKNOWN\"\n\n scheme = Scheme(\n platlib=paths[\"platlib\"],\n purelib=paths[\"purelib\"],\n headers=os.path.join(paths[\"include\"], dist_name),\n scripts=paths[\"scripts\"],\n data=paths[\"data\"],\n )\n if root is not None:\n for key in SCHEME_KEYS:\n value = distutils.util.change_root(root, getattr(scheme, key))\n setattr(scheme, key, value)\n return scheme\n\n\ndef get_bin_prefix() -> str:\n # Forcing to use /usr/local/bin for standard macOS framework installs.\n if sys.platform[:6] == \"darwin\" and sys.prefix[:16] == \"/System/Library/\":\n return \"/usr/local/bin\"\n return sysconfig.get_paths()[\"scripts\"]\n\n\ndef get_purelib() -> str:\n return sysconfig.get_paths()[\"purelib\"]\n\n\ndef get_platlib() -> str:\n return sysconfig.get_paths()[\"platlib\"]\n\n\ndef get_prefixed_libs(prefix: str) -> typing.Tuple[str, str]:\n paths = sysconfig.get_paths(vars={\"base\": prefix, \"platbase\": prefix})\n return (paths[\"purelib\"], paths[\"platlib\"])\n", "path": "src/pip/_internal/locations/_sysconfig.py"}], "after_files": [{"content": "import distutils.util # FIXME: For change_root.\nimport logging\nimport os\nimport sys\nimport sysconfig\nimport typing\n\nfrom pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid\nfrom pip._internal.models.scheme import SCHEME_KEYS, Scheme\nfrom pip._internal.utils.virtualenv import running_under_virtualenv\n\nfrom .base import get_major_minor_version, is_osx_framework\n\nlogger = logging.getLogger(__name__)\n\n\n# Notes on _infer_* functions.\n# Unfortunately ``_get_default_scheme()`` is private, so there's no way to\n# ask things like \"what is the '_prefix' scheme on this platform\". These\n# functions try to answer that with some heuristics while accounting for ad-hoc\n# platforms not covered by CPython's default sysconfig implementation. If the\n# ad-hoc implementation does not fully implement sysconfig, we'll fall back to\n# a POSIX scheme.\n\n_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())\n\n_HAS_PREFERRED_SCHEME_API = sys.version_info >= (3, 10)\n\n\ndef _infer_prefix() -> str:\n \"\"\"Try to find a prefix scheme for the current platform.\n\n This tries:\n\n * A special ``osx_framework_library`` for Python distributed by Apple's\n Command Line Tools, when not running in a virtual environment.\n * Implementation + OS, used by PyPy on Windows (``pypy_nt``).\n * Implementation without OS, used by PyPy on POSIX (``pypy``).\n * OS + \"prefix\", used by CPython on POSIX (``posix_prefix``).\n * Just the OS name, used by CPython on Windows (``nt``).\n\n If none of the above works, fall back to ``posix_prefix``.\n \"\"\"\n if _HAS_PREFERRED_SCHEME_API:\n return sysconfig.get_preferred_scheme(\"prefix\") # type: ignore\n os_framework_global = is_osx_framework() and not running_under_virtualenv()\n if os_framework_global and \"osx_framework_library\" in _AVAILABLE_SCHEMES:\n return \"osx_framework_library\"\n implementation_suffixed = f\"{sys.implementation.name}_{os.name}\"\n if implementation_suffixed in _AVAILABLE_SCHEMES:\n return implementation_suffixed\n if sys.implementation.name in _AVAILABLE_SCHEMES:\n return sys.implementation.name\n suffixed = f\"{os.name}_prefix\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called \"nt\".\n return os.name\n return \"posix_prefix\"\n\n\ndef _infer_user() -> str:\n \"\"\"Try to find a user scheme for the current platform.\"\"\"\n if _HAS_PREFERRED_SCHEME_API:\n return sysconfig.get_preferred_scheme(\"user\") # type: ignore\n if is_osx_framework() and not running_under_virtualenv():\n suffixed = \"osx_framework_user\"\n else:\n suffixed = f\"{os.name}_user\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n if \"posix_user\" not in _AVAILABLE_SCHEMES: # User scheme unavailable.\n raise UserInstallationInvalid()\n return \"posix_user\"\n\n\ndef _infer_home() -> str:\n \"\"\"Try to find a home for the current platform.\"\"\"\n if _HAS_PREFERRED_SCHEME_API:\n return sysconfig.get_preferred_scheme(\"home\") # type: ignore\n suffixed = f\"{os.name}_home\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n return \"posix_home\"\n\n\n# Update these keys if the user sets a custom home.\n_HOME_KEYS = [\n \"installed_base\",\n \"base\",\n \"installed_platbase\",\n \"platbase\",\n \"prefix\",\n \"exec_prefix\",\n]\nif sysconfig.get_config_var(\"userbase\") is not None:\n _HOME_KEYS.append(\"userbase\")\n\n\ndef get_scheme(\n dist_name: str,\n user: bool = False,\n home: typing.Optional[str] = None,\n root: typing.Optional[str] = None,\n isolated: bool = False,\n prefix: typing.Optional[str] = None,\n) -> Scheme:\n \"\"\"\n Get the \"scheme\" corresponding to the input parameters.\n\n :param dist_name: the name of the package to retrieve the scheme for, used\n in the headers scheme path\n :param user: indicates to use the \"user\" scheme\n :param home: indicates to use the \"home\" scheme\n :param root: root under which other directories are re-based\n :param isolated: ignored, but kept for distutils compatibility (where\n this controls whether the user-site pydistutils.cfg is honored)\n :param prefix: indicates to use the \"prefix\" scheme and provides the\n base directory for the same\n \"\"\"\n if user and prefix:\n raise InvalidSchemeCombination(\"--user\", \"--prefix\")\n if home and prefix:\n raise InvalidSchemeCombination(\"--home\", \"--prefix\")\n\n if home is not None:\n scheme_name = _infer_home()\n elif user:\n scheme_name = _infer_user()\n else:\n scheme_name = _infer_prefix()\n\n if home is not None:\n variables = {k: home for k in _HOME_KEYS}\n elif prefix is not None:\n variables = {k: prefix for k in _HOME_KEYS}\n else:\n variables = {}\n\n paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)\n\n # Logic here is very arbitrary, we're doing it for compatibility, don't ask.\n # 1. Pip historically uses a special header path in virtual environments.\n # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We\n # only do the same when not running in a virtual environment because\n # pip's historical header path logic (see point 1) did not do this.\n if running_under_virtualenv():\n if user:\n base = variables.get(\"userbase\", sys.prefix)\n else:\n base = variables.get(\"base\", sys.prefix)\n python_xy = f\"python{get_major_minor_version()}\"\n paths[\"include\"] = os.path.join(base, \"include\", \"site\", python_xy)\n elif not dist_name:\n dist_name = \"UNKNOWN\"\n\n scheme = Scheme(\n platlib=paths[\"platlib\"],\n purelib=paths[\"purelib\"],\n headers=os.path.join(paths[\"include\"], dist_name),\n scripts=paths[\"scripts\"],\n data=paths[\"data\"],\n )\n if root is not None:\n for key in SCHEME_KEYS:\n value = distutils.util.change_root(root, getattr(scheme, key))\n setattr(scheme, key, value)\n return scheme\n\n\ndef get_bin_prefix() -> str:\n # Forcing to use /usr/local/bin for standard macOS framework installs.\n if sys.platform[:6] == \"darwin\" and sys.prefix[:16] == \"/System/Library/\":\n return \"/usr/local/bin\"\n return sysconfig.get_paths()[\"scripts\"]\n\n\ndef get_purelib() -> str:\n return sysconfig.get_paths()[\"purelib\"]\n\n\ndef get_platlib() -> str:\n return sysconfig.get_paths()[\"platlib\"]\n\n\ndef get_prefixed_libs(prefix: str) -> typing.Tuple[str, str]:\n paths = sysconfig.get_paths(vars={\"base\": prefix, \"platbase\": prefix})\n return (paths[\"purelib\"], paths[\"platlib\"])\n", "path": "src/pip/_internal/locations/_sysconfig.py"}]} | 2,272 | 408 |
gh_patches_debug_45488 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3519 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create single build updates in pending status for rawhide
Now that we have the automation in place to automatically move an update from pending to testing we should use that for single build updates too.
For that we need to change the automatic_update consumer (https://github.com/fedora-infra/bodhi/blob/develop/bodhi/server/consumers/automatic_updates.py) to create the update in the pending status.
And make sure that the signed consumer will move the updates from pending to testing when the build is signed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/consumers/automatic_updates.py`
Content:
```
1 # Copyright © 2019 Red Hat, Inc. and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The Bodhi handler that creates updates automatically from tagged builds.
20
21 This module is responsible for the process of creating updates when builds are
22 tagged with certain tags.
23 """
24
25 import logging
26
27 import fedora_messaging
28
29 from bodhi.server import buildsys
30 from bodhi.server.config import config
31 from bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus
32 from bodhi.server.models import Update, UpdateStatus, UpdateType, User
33 from bodhi.server.util import transactional_session_maker
34
35 log = logging.getLogger('bodhi')
36
37
38 class AutomaticUpdateHandler:
39 """
40 The Bodhi Automatic Update Handler.
41
42 A consumer that listens for messages about tagged builds and creates
43 updates from them.
44 """
45
46 def __init__(self, db_factory: transactional_session_maker = None):
47 """
48 Initialize the Automatic Update Handler.
49
50 Args:
51 db_factory: If given, used as the db_factory for this handler. If
52 None (the default), a new TransactionalSessionMaker is created and
53 used.
54 """
55 if not db_factory:
56 self.db_factory = transactional_session_maker()
57 else:
58 self.db_factory = db_factory
59
60 def __call__(self, message: fedora_messaging.api.Message) -> None:
61 """Create updates from appropriately tagged builds.
62
63 Args:
64 message: The message we are processing.
65 """
66 body = message.body
67
68 missing = []
69 for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):
70 if mandatory not in body:
71 missing.append(mandatory)
72 if missing:
73 log.debug(f"Received incomplete tag message. Missing: {', '.join(missing)}")
74 return
75
76 btag = body['tag']
77 bnvr = '{name}-{version}-{release}'.format(**body)
78
79 koji = buildsys.get_session()
80
81 kbuildinfo = koji.getBuild(bnvr)
82 if not kbuildinfo:
83 log.debug(f"Can't find Koji build for {bnvr}.")
84 return
85
86 if 'nvr' not in kbuildinfo:
87 log.debug(f"Koji build info for {bnvr} doesn't contain 'nvr'.")
88 return
89
90 if 'owner_name' not in kbuildinfo:
91 log.debug(f"Koji build info for {bnvr} doesn't contain 'owner_name'.")
92 return
93
94 if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):
95 log.debug(f"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in "
96 "automatic_updates_blacklist, skipping.")
97 return
98
99 # some APIs want the Koji build info, some others want the same
100 # wrapped in a larger (request?) structure
101 rbuildinfo = {
102 'info': kbuildinfo,
103 'nvr': kbuildinfo['nvr'].rsplit('-', 2),
104 }
105
106 with self.db_factory() as dbsession:
107 rel = dbsession.query(Release).filter_by(create_automatic_updates=True,
108 pending_testing_tag=btag).first()
109 if not rel:
110 log.debug(f"Ignoring build being tagged into {btag!r}, no release configured for "
111 "automatic updates for it found.")
112 return
113
114 bcls = ContentType.infer_content_class(Build, kbuildinfo)
115 build = bcls.get(bnvr)
116 if build and build.update:
117 if build.update.status == UpdateStatus.pending:
118 log.info(
119 f"Build, active update for {bnvr} exists already "
120 "in Pending, moving it along.")
121 build.update.status = UpdateStatus.testing
122 build.update.request = None
123 dbsession.add(build)
124 if config.get('test_gating.required'):
125 log.debug(
126 'Test gating is required, marking the update as waiting on test '
127 'gating and updating it from Greenwave to get the real status.')
128 build.update.test_gating_status = TestGatingStatus.waiting
129 build.update.update_test_gating_status()
130 dbsession.commit()
131 else:
132 log.info(f"Build, active update for {bnvr} exists already, skipping.")
133 return
134
135 if not build:
136 log.debug(f"Build for {bnvr} doesn't exist yet, creating.")
137
138 # Package.get_or_create() infers content type already
139 log.debug("Getting/creating related package object.")
140 pkg = Package.get_or_create(rbuildinfo)
141
142 log.debug("Creating build object, adding it to the DB.")
143 build = bcls(nvr=bnvr, package=pkg)
144 dbsession.add(build)
145
146 owner_name = kbuildinfo['owner_name']
147 user = User.get(owner_name)
148 if not user:
149 log.debug(f"Creating bodhi user for '{owner_name}'.")
150 # Leave email, groups blank, these will be filled
151 # in or updated when they log into Bodhi next time, see
152 # bodhi.server.security:remember_me().
153 user = User(name=owner_name)
154 dbsession.add(user)
155
156 log.debug(f"Creating new update for {bnvr}.")
157 update = Update(
158 release=rel,
159 builds=[build],
160 notes=f"Automatic update for {bnvr}.",
161 type=UpdateType.unspecified,
162 stable_karma=3,
163 unstable_karma=-3,
164 autokarma=False,
165 user=user,
166 status=UpdateStatus.testing,
167 )
168
169 # Comment on the update that it was automatically created.
170 update.comment(
171 dbsession,
172 str("This update was automatically created"),
173 author="bodhi",
174 )
175
176 if config.get('test_gating.required'):
177 log.debug(
178 'Test gating required is enforced, marking the update as '
179 'waiting on test gating and updating it from Greenwave to '
180 'get the real status.')
181 update.test_gating_status = TestGatingStatus.waiting
182 update.update_test_gating_status()
183
184 log.debug("Adding new update to the database.")
185 dbsession.add(update)
186
187 log.debug("Committing changes to the database.")
188 dbsession.commit()
189
```
Path: `bodhi/server/consumers/signed.py`
Content:
```
1 # Copyright © 2016-2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "signed handler".
20
21 This module is responsible for marking builds as "signed" when they get moved
22 from the pending-signing to pending-updates-testing tag by RoboSignatory.
23 """
24
25 import logging
26
27 import fedora_messaging
28 from sqlalchemy import func
29
30 from bodhi.server.models import Build, UpdateStatus
31 from bodhi.server.util import transactional_session_maker
32
33 log = logging.getLogger('bodhi')
34
35
36 class SignedHandler(object):
37 """
38 The Bodhi Signed Handler.
39
40 A fedora-messaging listener waiting for messages from koji about builds being tagged.
41 """
42
43 def __init__(self):
44 """Initialize the SignedHandler."""
45 self.db_factory = transactional_session_maker()
46
47 def __call__(self, message: fedora_messaging.api.Message):
48 """
49 Handle messages arriving with the configured topic.
50
51 This marks a build as signed if it is assigned to the pending testing release tag.
52
53 Example message format::
54 {
55 'body': {
56 'build_id': 442562,
57 'name': 'colord',
58 'tag_id': 214,
59 'instance': 's390',
60 'tag': 'f26-updates-testing-pending',
61 'user': 'sharkcz',
62 'version': '1.3.4',
63 'owner': 'sharkcz',
64 'release': '1.fc26'
65 },
66 }
67
68 The message can contain additional keys.
69
70 Duplicate messages: this method is idempotent.
71
72 Args:
73 message: The incoming message in the format described above.
74 """
75 message = message.body
76 build_nvr = '%(name)s-%(version)s-%(release)s' % message
77 tag = message['tag']
78
79 log.info("%s tagged into %s" % (build_nvr, tag))
80
81 with self.db_factory():
82 build = Build.get(build_nvr)
83 if not build:
84 log.info("Build was not submitted, skipping")
85 return
86
87 if not build.release:
88 log.info('Build is not assigned to release, skipping')
89 return
90
91 if build.update.from_tag:
92 koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)
93 if tag != koji_testing_tag:
94 log.info("Tag is not testing side tag, skipping")
95 return
96 else:
97 if build.release.pending_testing_tag != tag:
98 log.info("Tag is not pending_testing tag, skipping")
99 return
100
101 if build.signed:
102 log.info("Build was already marked as signed (maybe a duplicate message)")
103 return
104
105 # This build was moved into the pending_testing tag for the applicable release, which
106 # is done by RoboSignatory to indicate that the build has been correctly signed and
107 # written out. Mark it as such.
108 log.info("Build has been signed, marking")
109 build.signed = True
110 log.info("Build %s has been marked as signed" % build_nvr)
111
112 # If every build in update is signed change status to testing
113 if build.update.from_tag and build.update.signed():
114 log.info("Every build in update is signed, set status to testing")
115
116 build.update.status = UpdateStatus.testing
117 build.update.date_testing = func.current_timestamp()
118 build.update.request = None
119 log.info(f"Update {build.update.display_name} status has been set to testing")
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py
--- a/bodhi/server/consumers/automatic_updates.py
+++ b/bodhi/server/consumers/automatic_updates.py
@@ -28,7 +28,7 @@
from bodhi.server import buildsys
from bodhi.server.config import config
-from bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus
+from bodhi.server.models import Build, ContentType, Package, Release
from bodhi.server.models import Update, UpdateStatus, UpdateType, User
from bodhi.server.util import transactional_session_maker
@@ -105,7 +105,7 @@
with self.db_factory() as dbsession:
rel = dbsession.query(Release).filter_by(create_automatic_updates=True,
- pending_testing_tag=btag).first()
+ candidate_tag=btag).first()
if not rel:
log.debug(f"Ignoring build being tagged into {btag!r}, no release configured for "
"automatic updates for it found.")
@@ -114,22 +114,7 @@
bcls = ContentType.infer_content_class(Build, kbuildinfo)
build = bcls.get(bnvr)
if build and build.update:
- if build.update.status == UpdateStatus.pending:
- log.info(
- f"Build, active update for {bnvr} exists already "
- "in Pending, moving it along.")
- build.update.status = UpdateStatus.testing
- build.update.request = None
- dbsession.add(build)
- if config.get('test_gating.required'):
- log.debug(
- 'Test gating is required, marking the update as waiting on test '
- 'gating and updating it from Greenwave to get the real status.')
- build.update.test_gating_status = TestGatingStatus.waiting
- build.update.update_test_gating_status()
- dbsession.commit()
- else:
- log.info(f"Build, active update for {bnvr} exists already, skipping.")
+ log.info(f"Build, active update for {bnvr} exists already, skipping.")
return
if not build:
@@ -163,7 +148,7 @@
unstable_karma=-3,
autokarma=False,
user=user,
- status=UpdateStatus.testing,
+ status=UpdateStatus.pending,
)
# Comment on the update that it was automatically created.
@@ -173,14 +158,6 @@
author="bodhi",
)
- if config.get('test_gating.required'):
- log.debug(
- 'Test gating required is enforced, marking the update as '
- 'waiting on test gating and updating it from Greenwave to '
- 'get the real status.')
- update.test_gating_status = TestGatingStatus.waiting
- update.update_test_gating_status()
-
log.debug("Adding new update to the database.")
dbsession.add(update)
diff --git a/bodhi/server/consumers/signed.py b/bodhi/server/consumers/signed.py
--- a/bodhi/server/consumers/signed.py
+++ b/bodhi/server/consumers/signed.py
@@ -27,7 +27,8 @@
import fedora_messaging
from sqlalchemy import func
-from bodhi.server.models import Build, UpdateStatus
+from bodhi.server.config import config
+from bodhi.server.models import Build, UpdateStatus, TestGatingStatus
from bodhi.server.util import transactional_session_maker
log = logging.getLogger('bodhi')
@@ -110,10 +111,17 @@
log.info("Build %s has been marked as signed" % build_nvr)
# If every build in update is signed change status to testing
- if build.update.from_tag and build.update.signed():
+ if not build.update.release.composed_by_bodhi and build.update.signed():
log.info("Every build in update is signed, set status to testing")
build.update.status = UpdateStatus.testing
build.update.date_testing = func.current_timestamp()
build.update.request = None
+
+ if config.get("test_gating.required"):
+ log.debug('Test gating is required, marking the update as waiting on test '
+ 'gating and updating it from Greenwave to get the real status.')
+ build.update.test_gating_status = TestGatingStatus.waiting
+ build.update.update_test_gating_status()
+
log.info(f"Update {build.update.display_name} status has been set to testing")
| {"golden_diff": "diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py\n--- a/bodhi/server/consumers/automatic_updates.py\n+++ b/bodhi/server/consumers/automatic_updates.py\n@@ -28,7 +28,7 @@\n \n from bodhi.server import buildsys\n from bodhi.server.config import config\n-from bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus\n+from bodhi.server.models import Build, ContentType, Package, Release\n from bodhi.server.models import Update, UpdateStatus, UpdateType, User\n from bodhi.server.util import transactional_session_maker\n \n@@ -105,7 +105,7 @@\n \n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n- pending_testing_tag=btag).first()\n+ candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n@@ -114,22 +114,7 @@\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n- if build.update.status == UpdateStatus.pending:\n- log.info(\n- f\"Build, active update for {bnvr} exists already \"\n- \"in Pending, moving it along.\")\n- build.update.status = UpdateStatus.testing\n- build.update.request = None\n- dbsession.add(build)\n- if config.get('test_gating.required'):\n- log.debug(\n- 'Test gating is required, marking the update as waiting on test '\n- 'gating and updating it from Greenwave to get the real status.')\n- build.update.test_gating_status = TestGatingStatus.waiting\n- build.update.update_test_gating_status()\n- dbsession.commit()\n- else:\n- log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n+ log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n \n if not build:\n@@ -163,7 +148,7 @@\n unstable_karma=-3,\n autokarma=False,\n user=user,\n- status=UpdateStatus.testing,\n+ status=UpdateStatus.pending,\n )\n \n # Comment on the update that it was automatically created.\n@@ -173,14 +158,6 @@\n author=\"bodhi\",\n )\n \n- if config.get('test_gating.required'):\n- log.debug(\n- 'Test gating required is enforced, marking the update as '\n- 'waiting on test gating and updating it from Greenwave to '\n- 'get the real status.')\n- update.test_gating_status = TestGatingStatus.waiting\n- update.update_test_gating_status()\n-\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n \ndiff --git a/bodhi/server/consumers/signed.py b/bodhi/server/consumers/signed.py\n--- a/bodhi/server/consumers/signed.py\n+++ b/bodhi/server/consumers/signed.py\n@@ -27,7 +27,8 @@\n import fedora_messaging\n from sqlalchemy import func\n \n-from bodhi.server.models import Build, UpdateStatus\n+from bodhi.server.config import config\n+from bodhi.server.models import Build, UpdateStatus, TestGatingStatus\n from bodhi.server.util import transactional_session_maker\n \n log = logging.getLogger('bodhi')\n@@ -110,10 +111,17 @@\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n \n # If every build in update is signed change status to testing\n- if build.update.from_tag and build.update.signed():\n+ if not build.update.release.composed_by_bodhi and build.update.signed():\n log.info(\"Every build in update is signed, set status to testing\")\n \n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n+\n+ if config.get(\"test_gating.required\"):\n+ log.debug('Test gating is required, marking the update as waiting on test '\n+ 'gating and updating it from Greenwave to get the real status.')\n+ build.update.test_gating_status = TestGatingStatus.waiting\n+ build.update.update_test_gating_status()\n+\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "issue": "Create single build updates in pending status for rawhide\nNow that we have the automation in place to automatically move an update from pending to testing we should use that for single build updates too.\r\n\r\nFor that we need to change the automatic_update consumer (https://github.com/fedora-infra/bodhi/blob/develop/bodhi/server/consumers/automatic_updates.py) to create the update in the pending status. \r\n\r\nAnd make sure that the signed consumer will move the updates from pending to testing when the build is signed.\n", "before_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus\nfrom bodhi.server.models import Update, UpdateStatus, UpdateType, User\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n pending_testing_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n if build.update.status == UpdateStatus.pending:\n log.info(\n f\"Build, active update for {bnvr} exists already \"\n \"in Pending, moving it along.\")\n build.update.status = UpdateStatus.testing\n build.update.request = None\n dbsession.add(build)\n if config.get('test_gating.required'):\n log.debug(\n 'Test gating is required, marking the update as waiting on test '\n 'gating and updating it from Greenwave to get the real status.')\n build.update.test_gating_status = TestGatingStatus.waiting\n build.update.update_test_gating_status()\n dbsession.commit()\n else:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n update = Update(\n release=rel,\n builds=[build],\n notes=f\"Automatic update for {bnvr}.\",\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.testing,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n if config.get('test_gating.required'):\n log.debug(\n 'Test gating required is enforced, marking the update as '\n 'waiting on test gating and updating it from Greenwave to '\n 'get the real status.')\n update.test_gating_status = TestGatingStatus.waiting\n update.update_test_gating_status()\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Committing changes to the database.\")\n dbsession.commit()\n", "path": "bodhi/server/consumers/automatic_updates.py"}, {"content": "# Copyright \u00a9 2016-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"signed handler\".\n\nThis module is responsible for marking builds as \"signed\" when they get moved\nfrom the pending-signing to pending-updates-testing tag by RoboSignatory.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\nfrom sqlalchemy import func\n\nfrom bodhi.server.models import Build, UpdateStatus\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass SignedHandler(object):\n \"\"\"\n The Bodhi Signed Handler.\n\n A fedora-messaging listener waiting for messages from koji about builds being tagged.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the SignedHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"\n Handle messages arriving with the configured topic.\n\n This marks a build as signed if it is assigned to the pending testing release tag.\n\n Example message format::\n {\n 'body': {\n 'build_id': 442562,\n 'name': 'colord',\n 'tag_id': 214,\n 'instance': 's390',\n 'tag': 'f26-updates-testing-pending',\n 'user': 'sharkcz',\n 'version': '1.3.4',\n 'owner': 'sharkcz',\n 'release': '1.fc26'\n },\n }\n\n The message can contain additional keys.\n\n Duplicate messages: this method is idempotent.\n\n Args:\n message: The incoming message in the format described above.\n \"\"\"\n message = message.body\n build_nvr = '%(name)s-%(version)s-%(release)s' % message\n tag = message['tag']\n\n log.info(\"%s tagged into %s\" % (build_nvr, tag))\n\n with self.db_factory():\n build = Build.get(build_nvr)\n if not build:\n log.info(\"Build was not submitted, skipping\")\n return\n\n if not build.release:\n log.info('Build is not assigned to release, skipping')\n return\n\n if build.update.from_tag:\n koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)\n if tag != koji_testing_tag:\n log.info(\"Tag is not testing side tag, skipping\")\n return\n else:\n if build.release.pending_testing_tag != tag:\n log.info(\"Tag is not pending_testing tag, skipping\")\n return\n\n if build.signed:\n log.info(\"Build was already marked as signed (maybe a duplicate message)\")\n return\n\n # This build was moved into the pending_testing tag for the applicable release, which\n # is done by RoboSignatory to indicate that the build has been correctly signed and\n # written out. Mark it as such.\n log.info(\"Build has been signed, marking\")\n build.signed = True\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n\n # If every build in update is signed change status to testing\n if build.update.from_tag and build.update.signed():\n log.info(\"Every build in update is signed, set status to testing\")\n\n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "path": "bodhi/server/consumers/signed.py"}], "after_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import Build, ContentType, Package, Release\nfrom bodhi.server.models import Update, UpdateStatus, UpdateType, User\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n update = Update(\n release=rel,\n builds=[build],\n notes=f\"Automatic update for {bnvr}.\",\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.pending,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Committing changes to the database.\")\n dbsession.commit()\n", "path": "bodhi/server/consumers/automatic_updates.py"}, {"content": "# Copyright \u00a9 2016-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"signed handler\".\n\nThis module is responsible for marking builds as \"signed\" when they get moved\nfrom the pending-signing to pending-updates-testing tag by RoboSignatory.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\nfrom sqlalchemy import func\n\nfrom bodhi.server.config import config\nfrom bodhi.server.models import Build, UpdateStatus, TestGatingStatus\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass SignedHandler(object):\n \"\"\"\n The Bodhi Signed Handler.\n\n A fedora-messaging listener waiting for messages from koji about builds being tagged.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the SignedHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"\n Handle messages arriving with the configured topic.\n\n This marks a build as signed if it is assigned to the pending testing release tag.\n\n Example message format::\n {\n 'body': {\n 'build_id': 442562,\n 'name': 'colord',\n 'tag_id': 214,\n 'instance': 's390',\n 'tag': 'f26-updates-testing-pending',\n 'user': 'sharkcz',\n 'version': '1.3.4',\n 'owner': 'sharkcz',\n 'release': '1.fc26'\n },\n }\n\n The message can contain additional keys.\n\n Duplicate messages: this method is idempotent.\n\n Args:\n message: The incoming message in the format described above.\n \"\"\"\n message = message.body\n build_nvr = '%(name)s-%(version)s-%(release)s' % message\n tag = message['tag']\n\n log.info(\"%s tagged into %s\" % (build_nvr, tag))\n\n with self.db_factory():\n build = Build.get(build_nvr)\n if not build:\n log.info(\"Build was not submitted, skipping\")\n return\n\n if not build.release:\n log.info('Build is not assigned to release, skipping')\n return\n\n if build.update.from_tag:\n koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)\n if tag != koji_testing_tag:\n log.info(\"Tag is not testing side tag, skipping\")\n return\n else:\n if build.release.pending_testing_tag != tag:\n log.info(\"Tag is not pending_testing tag, skipping\")\n return\n\n if build.signed:\n log.info(\"Build was already marked as signed (maybe a duplicate message)\")\n return\n\n # This build was moved into the pending_testing tag for the applicable release, which\n # is done by RoboSignatory to indicate that the build has been correctly signed and\n # written out. Mark it as such.\n log.info(\"Build has been signed, marking\")\n build.signed = True\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n\n # If every build in update is signed change status to testing\n if not build.update.release.composed_by_bodhi and build.update.signed():\n log.info(\"Every build in update is signed, set status to testing\")\n\n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n\n if config.get(\"test_gating.required\"):\n log.debug('Test gating is required, marking the update as waiting on test '\n 'gating and updating it from Greenwave to get the real status.')\n build.update.test_gating_status = TestGatingStatus.waiting\n build.update.update_test_gating_status()\n\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "path": "bodhi/server/consumers/signed.py"}]} | 3,551 | 1,021 |
gh_patches_debug_3628 | rasdani/github-patches | git_diff | coala__coala-1954 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Annotations: handling exceptions
When writing custom annotation, user should be able to access exceptions, that it's raising, to debug his function. Currently all that he gets is the information that something went wrong, e. g.
```
[WARNING][14:09:44] The bear ClangComplexityBear cannot be executed. Unable to convert parameter 'max_complexity' into type <function func at 0x7fa1b4d12d90>
```
This can be pretty easy fixed, by specifying exception [here](https://github.com/coala-analyzer/coala/blob/master/coalib/settings/FunctionMetadata.py#L105) and specifying in the docs what exception should annotations be allowed to raise.
This is related to https://github.com/coala-analyzer/coala/issues/1219 discussion. I think, maybe we need to figure out a better way to use exceptions so that there will be no need in catching broad ones, because they can hide bugs or other useful information.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/settings/FunctionMetadata.py`
Content:
```
1 from collections import OrderedDict
2 from copy import copy
3 from inspect import getfullargspec, ismethod
4
5 from coalib.settings.DocumentationComment import DocumentationComment
6
7
8 class FunctionMetadata:
9 str_nodesc = "No description given."
10 str_optional = "Optional, defaults to '{}'."
11
12 def __init__(self,
13 name,
14 desc="",
15 retval_desc="",
16 non_optional_params=None,
17 optional_params=None,
18 omit=frozenset()):
19 """
20 Creates the FunctionMetadata object.
21
22 :param name: The name of the function.
23 :param desc: The description of the function.
24 :param retval_desc: The retval description of the function.
25 :param non_optional_params: A dict containing the name of non optional
26 parameters as the key and a tuple of a
27 description and the python annotation. To
28 preserve the order, use OrderedDict.
29 :param optional_params: A dict containing the name of optional
30 parameters as the key and a tuple
31 of a description, the python annotation and
32 the default value. To preserve the order,
33 use OrderedDict.
34 :param omit: A set of parameters to omit.
35 """
36 if non_optional_params is None:
37 non_optional_params = OrderedDict()
38 if optional_params is None:
39 optional_params = OrderedDict()
40
41 self.name = name
42 self.desc = desc
43 self.retval_desc = retval_desc
44 self._non_optional_params = non_optional_params
45 self._optional_params = optional_params
46 self.omit = set(omit)
47
48 def _filter_out_omitted(self, params):
49 """
50 Filters out parameters that are to omit. This is a helper method for
51 the param related properties.
52
53 :param params: The parameter dictionary to filter.
54 :return: The filtered dictionary.
55 """
56 return OrderedDict(filter(lambda p: p[0] not in self.omit,
57 tuple(params.items())))
58
59 @property
60 def non_optional_params(self):
61 """
62 Retrieves a dict containing the name of non optional parameters as the
63 key and a tuple of a description and the python annotation. Values that
64 are present in self.omit will be omitted.
65 """
66 return self._filter_out_omitted(self._non_optional_params)
67
68 @property
69 def optional_params(self):
70 """
71 Retrieves a dict containing the name of optional parameters as the key
72 and a tuple of a description, the python annotation and the default
73 value. Values that are present in self.omit will be omitted.
74 """
75 return self._filter_out_omitted(self._optional_params)
76
77 def create_params_from_section(self, section):
78 """
79 Create a params dictionary for this function that holds all values the
80 function needs plus optional ones that are available.
81
82 :param section: The section to retrieve the values from.
83 :return: The params dictionary.
84 """
85 params = {}
86
87 for param in self.non_optional_params:
88 _, annotation = self.non_optional_params[param]
89 params[param] = self._get_param(param, section, annotation)
90
91 for param in self.optional_params:
92 if param in section:
93 _, annotation, _ = self.optional_params[param]
94 params[param] = self._get_param(param, section, annotation)
95
96 return params
97
98 @staticmethod
99 def _get_param(param, section, annotation):
100 if annotation is None:
101 annotation = lambda x: x
102
103 try:
104 return annotation(section[param])
105 except:
106 raise ValueError("Unable to convert parameter {} into type "
107 "{}.".format(repr(param), annotation))
108
109 @classmethod
110 def from_function(cls, func, omit=frozenset()):
111 """
112 Creates a FunctionMetadata object from a function. Please note that any
113 variable argument lists are not supported. If you do not want the
114 first (usual named 'self') argument to appear please pass the method of
115 an actual INSTANCE of a class; passing the method of the class isn't
116 enough. Alternatively you can add "self" to the omit set.
117
118 :param func: The function. If __metadata__ of the unbound function is
119 present it will be copied and used, otherwise it will be
120 generated.
121 :param omit: A set of parameter names that are to be ignored.
122 :return: The FunctionMetadata object corresponding to the given
123 function.
124 """
125 if hasattr(func, "__metadata__"):
126 metadata = copy(func.__metadata__)
127 metadata.omit = omit
128 return metadata
129
130 doc = func.__doc__ or ""
131 doc_comment = DocumentationComment.from_docstring(doc)
132
133 non_optional_params = OrderedDict()
134 optional_params = OrderedDict()
135
136 argspec = getfullargspec(func)
137 args = argspec.args or ()
138 defaults = argspec.defaults or ()
139 num_non_defaults = len(args) - len(defaults)
140 for i, arg in enumerate(args):
141 # Implicit self argument or omitted explicitly
142 if i < 1 and ismethod(func):
143 continue
144
145 if i < num_non_defaults:
146 non_optional_params[arg] = (
147 doc_comment.param_dict.get(arg, cls.str_nodesc),
148 argspec.annotations.get(arg, None))
149 else:
150 optional_params[arg] = (
151 doc_comment.param_dict.get(arg, cls.str_nodesc) + " (" +
152 cls.str_optional.format(str(defaults[i-num_non_defaults]))
153 + ")",
154 argspec.annotations.get(arg, None),
155 defaults[i-num_non_defaults])
156
157 return cls(name=func.__name__,
158 desc=doc_comment.desc,
159 retval_desc=doc_comment.retval_desc,
160 non_optional_params=non_optional_params,
161 optional_params=optional_params,
162 omit=omit)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/settings/FunctionMetadata.py b/coalib/settings/FunctionMetadata.py
--- a/coalib/settings/FunctionMetadata.py
+++ b/coalib/settings/FunctionMetadata.py
@@ -102,7 +102,7 @@
try:
return annotation(section[param])
- except:
+ except (TypeError, ValueError):
raise ValueError("Unable to convert parameter {} into type "
"{}.".format(repr(param), annotation))
| {"golden_diff": "diff --git a/coalib/settings/FunctionMetadata.py b/coalib/settings/FunctionMetadata.py\n--- a/coalib/settings/FunctionMetadata.py\n+++ b/coalib/settings/FunctionMetadata.py\n@@ -102,7 +102,7 @@\n \n try:\n return annotation(section[param])\n- except:\n+ except (TypeError, ValueError):\n raise ValueError(\"Unable to convert parameter {} into type \"\n \"{}.\".format(repr(param), annotation))\n", "issue": "Annotations: handling exceptions\nWhen writing custom annotation, user should be able to access exceptions, that it's raising, to debug his function. Currently all that he gets is the information that something went wrong, e. g.\n\n```\n[WARNING][14:09:44] The bear ClangComplexityBear cannot be executed. Unable to convert parameter 'max_complexity' into type <function func at 0x7fa1b4d12d90>\n```\n\nThis can be pretty easy fixed, by specifying exception [here](https://github.com/coala-analyzer/coala/blob/master/coalib/settings/FunctionMetadata.py#L105) and specifying in the docs what exception should annotations be allowed to raise.\n\nThis is related to https://github.com/coala-analyzer/coala/issues/1219 discussion. I think, maybe we need to figure out a better way to use exceptions so that there will be no need in catching broad ones, because they can hide bugs or other useful information.\n\n", "before_files": [{"content": "from collections import OrderedDict\nfrom copy import copy\nfrom inspect import getfullargspec, ismethod\n\nfrom coalib.settings.DocumentationComment import DocumentationComment\n\n\nclass FunctionMetadata:\n str_nodesc = \"No description given.\"\n str_optional = \"Optional, defaults to '{}'.\"\n\n def __init__(self,\n name,\n desc=\"\",\n retval_desc=\"\",\n non_optional_params=None,\n optional_params=None,\n omit=frozenset()):\n \"\"\"\n Creates the FunctionMetadata object.\n\n :param name: The name of the function.\n :param desc: The description of the function.\n :param retval_desc: The retval description of the function.\n :param non_optional_params: A dict containing the name of non optional\n parameters as the key and a tuple of a\n description and the python annotation. To\n preserve the order, use OrderedDict.\n :param optional_params: A dict containing the name of optional\n parameters as the key and a tuple\n of a description, the python annotation and\n the default value. To preserve the order,\n use OrderedDict.\n :param omit: A set of parameters to omit.\n \"\"\"\n if non_optional_params is None:\n non_optional_params = OrderedDict()\n if optional_params is None:\n optional_params = OrderedDict()\n\n self.name = name\n self.desc = desc\n self.retval_desc = retval_desc\n self._non_optional_params = non_optional_params\n self._optional_params = optional_params\n self.omit = set(omit)\n\n def _filter_out_omitted(self, params):\n \"\"\"\n Filters out parameters that are to omit. This is a helper method for\n the param related properties.\n\n :param params: The parameter dictionary to filter.\n :return: The filtered dictionary.\n \"\"\"\n return OrderedDict(filter(lambda p: p[0] not in self.omit,\n tuple(params.items())))\n\n @property\n def non_optional_params(self):\n \"\"\"\n Retrieves a dict containing the name of non optional parameters as the\n key and a tuple of a description and the python annotation. Values that\n are present in self.omit will be omitted.\n \"\"\"\n return self._filter_out_omitted(self._non_optional_params)\n\n @property\n def optional_params(self):\n \"\"\"\n Retrieves a dict containing the name of optional parameters as the key\n and a tuple of a description, the python annotation and the default\n value. Values that are present in self.omit will be omitted.\n \"\"\"\n return self._filter_out_omitted(self._optional_params)\n\n def create_params_from_section(self, section):\n \"\"\"\n Create a params dictionary for this function that holds all values the\n function needs plus optional ones that are available.\n\n :param section: The section to retrieve the values from.\n :return: The params dictionary.\n \"\"\"\n params = {}\n\n for param in self.non_optional_params:\n _, annotation = self.non_optional_params[param]\n params[param] = self._get_param(param, section, annotation)\n\n for param in self.optional_params:\n if param in section:\n _, annotation, _ = self.optional_params[param]\n params[param] = self._get_param(param, section, annotation)\n\n return params\n\n @staticmethod\n def _get_param(param, section, annotation):\n if annotation is None:\n annotation = lambda x: x\n\n try:\n return annotation(section[param])\n except:\n raise ValueError(\"Unable to convert parameter {} into type \"\n \"{}.\".format(repr(param), annotation))\n\n @classmethod\n def from_function(cls, func, omit=frozenset()):\n \"\"\"\n Creates a FunctionMetadata object from a function. Please note that any\n variable argument lists are not supported. If you do not want the\n first (usual named 'self') argument to appear please pass the method of\n an actual INSTANCE of a class; passing the method of the class isn't\n enough. Alternatively you can add \"self\" to the omit set.\n\n :param func: The function. If __metadata__ of the unbound function is\n present it will be copied and used, otherwise it will be\n generated.\n :param omit: A set of parameter names that are to be ignored.\n :return: The FunctionMetadata object corresponding to the given\n function.\n \"\"\"\n if hasattr(func, \"__metadata__\"):\n metadata = copy(func.__metadata__)\n metadata.omit = omit\n return metadata\n\n doc = func.__doc__ or \"\"\n doc_comment = DocumentationComment.from_docstring(doc)\n\n non_optional_params = OrderedDict()\n optional_params = OrderedDict()\n\n argspec = getfullargspec(func)\n args = argspec.args or ()\n defaults = argspec.defaults or ()\n num_non_defaults = len(args) - len(defaults)\n for i, arg in enumerate(args):\n # Implicit self argument or omitted explicitly\n if i < 1 and ismethod(func):\n continue\n\n if i < num_non_defaults:\n non_optional_params[arg] = (\n doc_comment.param_dict.get(arg, cls.str_nodesc),\n argspec.annotations.get(arg, None))\n else:\n optional_params[arg] = (\n doc_comment.param_dict.get(arg, cls.str_nodesc) + \" (\" +\n cls.str_optional.format(str(defaults[i-num_non_defaults]))\n + \")\",\n argspec.annotations.get(arg, None),\n defaults[i-num_non_defaults])\n\n return cls(name=func.__name__,\n desc=doc_comment.desc,\n retval_desc=doc_comment.retval_desc,\n non_optional_params=non_optional_params,\n optional_params=optional_params,\n omit=omit)\n", "path": "coalib/settings/FunctionMetadata.py"}], "after_files": [{"content": "from collections import OrderedDict\nfrom copy import copy\nfrom inspect import getfullargspec, ismethod\n\nfrom coalib.settings.DocumentationComment import DocumentationComment\n\n\nclass FunctionMetadata:\n str_nodesc = \"No description given.\"\n str_optional = \"Optional, defaults to '{}'.\"\n\n def __init__(self,\n name,\n desc=\"\",\n retval_desc=\"\",\n non_optional_params=None,\n optional_params=None,\n omit=frozenset()):\n \"\"\"\n Creates the FunctionMetadata object.\n\n :param name: The name of the function.\n :param desc: The description of the function.\n :param retval_desc: The retval description of the function.\n :param non_optional_params: A dict containing the name of non optional\n parameters as the key and a tuple of a\n description and the python annotation. To\n preserve the order, use OrderedDict.\n :param optional_params: A dict containing the name of optional\n parameters as the key and a tuple\n of a description, the python annotation and\n the default value. To preserve the order,\n use OrderedDict.\n :param omit: A set of parameters to omit.\n \"\"\"\n if non_optional_params is None:\n non_optional_params = OrderedDict()\n if optional_params is None:\n optional_params = OrderedDict()\n\n self.name = name\n self.desc = desc\n self.retval_desc = retval_desc\n self._non_optional_params = non_optional_params\n self._optional_params = optional_params\n self.omit = set(omit)\n\n def _filter_out_omitted(self, params):\n \"\"\"\n Filters out parameters that are to omit. This is a helper method for\n the param related properties.\n\n :param params: The parameter dictionary to filter.\n :return: The filtered dictionary.\n \"\"\"\n return OrderedDict(filter(lambda p: p[0] not in self.omit,\n tuple(params.items())))\n\n @property\n def non_optional_params(self):\n \"\"\"\n Retrieves a dict containing the name of non optional parameters as the\n key and a tuple of a description and the python annotation. Values that\n are present in self.omit will be omitted.\n \"\"\"\n return self._filter_out_omitted(self._non_optional_params)\n\n @property\n def optional_params(self):\n \"\"\"\n Retrieves a dict containing the name of optional parameters as the key\n and a tuple of a description, the python annotation and the default\n value. Values that are present in self.omit will be omitted.\n \"\"\"\n return self._filter_out_omitted(self._optional_params)\n\n def create_params_from_section(self, section):\n \"\"\"\n Create a params dictionary for this function that holds all values the\n function needs plus optional ones that are available.\n\n :param section: The section to retrieve the values from.\n :return: The params dictionary.\n \"\"\"\n params = {}\n\n for param in self.non_optional_params:\n _, annotation = self.non_optional_params[param]\n params[param] = self._get_param(param, section, annotation)\n\n for param in self.optional_params:\n if param in section:\n _, annotation, _ = self.optional_params[param]\n params[param] = self._get_param(param, section, annotation)\n\n return params\n\n @staticmethod\n def _get_param(param, section, annotation):\n if annotation is None:\n annotation = lambda x: x\n\n try:\n return annotation(section[param])\n except (TypeError, ValueError):\n raise ValueError(\"Unable to convert parameter {} into type \"\n \"{}.\".format(repr(param), annotation))\n\n @classmethod\n def from_function(cls, func, omit=frozenset()):\n \"\"\"\n Creates a FunctionMetadata object from a function. Please note that any\n variable argument lists are not supported. If you do not want the\n first (usual named 'self') argument to appear please pass the method of\n an actual INSTANCE of a class; passing the method of the class isn't\n enough. Alternatively you can add \"self\" to the omit set.\n\n :param func: The function. If __metadata__ of the unbound function is\n present it will be copied and used, otherwise it will be\n generated.\n :param omit: A set of parameter names that are to be ignored.\n :return: The FunctionMetadata object corresponding to the given\n function.\n \"\"\"\n if hasattr(func, \"__metadata__\"):\n metadata = copy(func.__metadata__)\n metadata.omit = omit\n return metadata\n\n doc = func.__doc__ or \"\"\n doc_comment = DocumentationComment.from_docstring(doc)\n\n non_optional_params = OrderedDict()\n optional_params = OrderedDict()\n\n argspec = getfullargspec(func)\n args = argspec.args or ()\n defaults = argspec.defaults or ()\n num_non_defaults = len(args) - len(defaults)\n for i, arg in enumerate(args):\n # Implicit self argument or omitted explicitly\n if i < 1 and ismethod(func):\n continue\n\n if i < num_non_defaults:\n non_optional_params[arg] = (\n doc_comment.param_dict.get(arg, cls.str_nodesc),\n argspec.annotations.get(arg, None))\n else:\n optional_params[arg] = (\n doc_comment.param_dict.get(arg, cls.str_nodesc) + \" (\" +\n cls.str_optional.format(str(defaults[i-num_non_defaults]))\n + \")\",\n argspec.annotations.get(arg, None),\n defaults[i-num_non_defaults])\n\n return cls(name=func.__name__,\n desc=doc_comment.desc,\n retval_desc=doc_comment.retval_desc,\n non_optional_params=non_optional_params,\n optional_params=optional_params,\n omit=omit)\n", "path": "coalib/settings/FunctionMetadata.py"}]} | 2,083 | 102 |
gh_patches_debug_806 | rasdani/github-patches | git_diff | feast-dev__feast-1742 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dependency PyYAML 5.3.* has vulnerability issues
## Expected Behavior
According to [CVE-2020-14343](https://nvd.nist.gov/vuln/detail/CVE-2020-14343):
> A vulnerability was discovered in the PyYAML library in versions before 5.4, where it is susceptible to arbitrary code execution when it processes untrusted YAML files through the full_load method or with the FullLoader loader. Applications that use the library to process untrusted input may be vulnerable to this flaw. This flaw allows an attacker to execute arbitrary code on the system by abusing the python/object/new constructor. This flaw is due to an incomplete fix for CVE-2020-1747. See CVE-2020-14343.
## Current Behavior
Feast Python SDK requires `PyYAML==5.3.*` version.
This not only affects Feast, but also any app depending on it, since dependencies are shared.
## Steps to reproduce
N/A
### Specifications
N/A
## Possible Solution
Bump PyYAML to a ">=5.4" version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/setup.py`
Content:
```
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import glob
15 import os
16 import re
17 import subprocess
18
19 from distutils.cmd import Command
20 from setuptools import find_packages
21
22 try:
23 from setuptools import setup
24 from setuptools.command.install import install
25 from setuptools.command.develop import develop
26 from setuptools.command.egg_info import egg_info
27 from setuptools.command.sdist import sdist
28 from setuptools.command.build_py import build_py
29 except ImportError:
30 from distutils.core import setup
31 from distutils.command.install import install
32 from distutils.command.build_py import build_py
33
34 NAME = "feast"
35 DESCRIPTION = "Python SDK for Feast"
36 URL = "https://github.com/feast-dev/feast"
37 AUTHOR = "Feast"
38 REQUIRES_PYTHON = ">=3.7.0"
39
40 REQUIRED = [
41 "Click==7.*",
42 "colorama>=0.3.9",
43 "fastavro>=1.1.0",
44 "google-api-core>=1.23.0",
45 "googleapis-common-protos==1.52.*",
46 "grpcio>=1.34.0",
47 "Jinja2>=2.0.0",
48 "jsonschema",
49 "mmh3",
50 "pandas>=1.0.0",
51 "pandavro==1.5.*",
52 "protobuf>=3.10",
53 "pyarrow>=2.0.0",
54 "pydantic>=1.0.0",
55 "PyYAML==5.3.*",
56 "tabulate==0.8.*",
57 "tenacity>=7.*",
58 "toml==0.10.*",
59 "tqdm==4.*",
60 ]
61
62 GCP_REQUIRED = [
63 "google-cloud-bigquery>=2.0.*",
64 "google-cloud-bigquery-storage >= 2.0.0",
65 "google-cloud-datastore>=2.1.*",
66 "google-cloud-storage>=1.34.*",
67 "google-cloud-core==1.4.*",
68 ]
69
70 REDIS_REQUIRED = [
71 "redis-py-cluster==2.1.2",
72 ]
73
74 AWS_REQUIRED = [
75 "boto3==1.17.*",
76 ]
77
78 CI_REQUIRED = [
79 "cryptography==3.3.2",
80 "flake8",
81 "black==19.10b0",
82 "isort>=5",
83 "grpcio-tools==1.34.0",
84 "grpcio-testing==1.34.0",
85 "mock==2.0.0",
86 "moto",
87 "mypy==0.790",
88 "mypy-protobuf==1.24",
89 "avro==1.10.0",
90 "gcsfs",
91 "urllib3>=1.25.4",
92 "pytest==6.0.0",
93 "pytest-cov",
94 "pytest-xdist",
95 "pytest-lazy-fixture==0.6.3",
96 "pytest-timeout==1.4.2",
97 "pytest-ordering==0.6.*",
98 "pytest-mock==1.10.4",
99 "Sphinx!=4.0.0",
100 "sphinx-rtd-theme",
101 "adlfs==0.5.9",
102 "firebase-admin==4.5.2",
103 "pre-commit",
104 "assertpy==1.1",
105 "google-cloud-bigquery>=2.0.*",
106 "google-cloud-bigquery-storage >= 2.0.0",
107 "google-cloud-datastore>=2.1.*",
108 "google-cloud-storage>=1.20.*",
109 "google-cloud-core==1.4.*",
110 "redis-py-cluster==2.1.2",
111 "boto3==1.17.*",
112 ]
113
114
115 # README file from Feast repo root directory
116 repo_root = (
117 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
118 .communicate()[0]
119 .rstrip()
120 .decode("utf-8")
121 )
122 README_FILE = os.path.join(repo_root, "README.md")
123 with open(README_FILE, "r") as f:
124 LONG_DESCRIPTION = f.read()
125
126 # Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
127 # Regex modified from default tag regex in:
128 # https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
129 TAG_REGEX = re.compile(
130 r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
131 )
132
133
134 class BuildProtoCommand(Command):
135 description = "Builds the proto files into python files."
136
137 def initialize_options(self):
138 self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
139 self.proto_folder = os.path.join(repo_root, "protos")
140 self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
141 self.sub_folders = ["core", "serving", "types", "storage"]
142
143 def finalize_options(self):
144 pass
145
146 def _generate_protos(self, path):
147 proto_files = glob.glob(os.path.join(self.proto_folder, path))
148
149 subprocess.check_call(self.protoc + [
150 '-I', self.proto_folder,
151 '--python_out', self.this_package,
152 '--grpc_python_out', self.this_package,
153 '--mypy_out', self.this_package] + proto_files)
154
155 def run(self):
156 for sub_folder in self.sub_folders:
157 self._generate_protos(f'feast/{sub_folder}/*.proto')
158
159 from pathlib import Path
160
161 for path in Path('feast/protos').rglob('*.py'):
162 for folder in self.sub_folders:
163 # Read in the file
164 with open(path, 'r') as file:
165 filedata = file.read()
166
167 # Replace the target string
168 filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
169
170 # Write the file out again
171 with open(path, 'w') as file:
172 file.write(filedata)
173
174
175 class BuildCommand(build_py):
176 """Custom build command."""
177
178 def run(self):
179 self.run_command('build_proto')
180 build_py.run(self)
181
182
183 class DevelopCommand(develop):
184 """Custom develop command."""
185
186 def run(self):
187 self.run_command('build_proto')
188 develop.run(self)
189
190
191 setup(
192 name=NAME,
193 author=AUTHOR,
194 description=DESCRIPTION,
195 long_description=LONG_DESCRIPTION,
196 long_description_content_type="text/markdown",
197 python_requires=REQUIRES_PYTHON,
198 url=URL,
199 packages=find_packages(exclude=("tests",)),
200 install_requires=REQUIRED,
201 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
202 # Install dev requirements with: pip install -e .[dev]
203 extras_require={
204 "dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
205 "ci": CI_REQUIRED,
206 "gcp": GCP_REQUIRED,
207 "aws": AWS_REQUIRED,
208 "redis": REDIS_REQUIRED,
209 },
210 include_package_data=True,
211 license="Apache",
212 classifiers=[
213 # Trove classifiers
214 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
215 "License :: OSI Approved :: Apache Software License",
216 "Programming Language :: Python",
217 "Programming Language :: Python :: 3",
218 "Programming Language :: Python :: 3.7",
219 ],
220 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
221 use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
222 setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf", "sphinx!=4.0.0"],
223 package_data={
224 "": [
225 "protos/feast/**/*.proto",
226 "protos/feast/third_party/grpc/health/v1/*.proto",
227 "protos/tensorflow_metadata/proto/v0/*.proto",
228 "feast/protos/feast/**/*.py",
229 "tensorflow_metadata/proto/v0/*.py"
230 ],
231 },
232 cmdclass={
233 "build_proto": BuildProtoCommand,
234 "build_py": BuildCommand,
235 "develop": DevelopCommand,
236 },
237 )
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -52,7 +52,7 @@
"protobuf>=3.10",
"pyarrow>=2.0.0",
"pydantic>=1.0.0",
- "PyYAML==5.3.*",
+ "PyYAML>=5.4.*",
"tabulate==0.8.*",
"tenacity>=7.*",
"toml==0.10.*",
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -52,7 +52,7 @@\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n- \"PyYAML==5.3.*\",\n+ \"PyYAML>=5.4.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n", "issue": "Dependency PyYAML 5.3.* has vulnerability issues\n## Expected Behavior \r\n\r\nAccording to [CVE-2020-14343](https://nvd.nist.gov/vuln/detail/CVE-2020-14343):\r\n\r\n> A vulnerability was discovered in the PyYAML library in versions before 5.4, where it is susceptible to arbitrary code execution when it processes untrusted YAML files through the full_load method or with the FullLoader loader. Applications that use the library to process untrusted input may be vulnerable to this flaw. This flaw allows an attacker to execute arbitrary code on the system by abusing the python/object/new constructor. This flaw is due to an incomplete fix for CVE-2020-1747. See CVE-2020-14343.\r\n\r\n## Current Behavior\r\n\r\nFeast Python SDK requires `PyYAML==5.3.*` version.\r\n\r\nThis not only affects Feast, but also any app depending on it, since dependencies are shared.\r\n\r\n## Steps to reproduce\r\n\r\nN/A\r\n\r\n### Specifications\r\n\r\nN/A\r\n\r\n## Possible Solution\r\n\r\nBump PyYAML to a \">=5.4\" version.\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.34.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nAWS_REQUIRED = [\n \"boto3==1.17.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n]\n\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML>=5.4.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.34.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nAWS_REQUIRED = [\n \"boto3==1.17.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n]\n\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}]} | 3,148 | 125 |
gh_patches_debug_1265 | rasdani/github-patches | git_diff | webkom__lego-1505 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add end_time of an event when getting all events with get request
I want to be able to get the end time of an event when getting all events. I know I can get the end time when getting a specific event, but it is a bit cumbersome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lego/apps/events/serializers/events.py`
Content:
```
1 from django.db import transaction
2 from rest_framework import serializers
3 from rest_framework.fields import BooleanField, CharField
4
5 from lego.apps.comments.serializers import CommentSerializer
6 from lego.apps.companies.fields import CompanyField
7 from lego.apps.companies.models import Company
8 from lego.apps.content.fields import ContentSerializerField
9 from lego.apps.events.constants import PRESENT
10 from lego.apps.events.fields import ActivationTimeField, SpotsLeftField
11 from lego.apps.events.models import Event, Pool
12 from lego.apps.events.serializers.pools import (
13 PoolAdministrateSerializer,
14 PoolCreateAndUpdateSerializer,
15 PoolReadAuthSerializer,
16 PoolReadSerializer,
17 )
18 from lego.apps.events.serializers.registrations import (
19 RegistrationReadDetailedSerializer,
20 RegistrationReadSerializer,
21 )
22 from lego.apps.files.fields import ImageField
23 from lego.apps.tags.serializers import TagSerializerMixin
24 from lego.apps.users.constants import GROUP_GRADE
25 from lego.apps.users.fields import AbakusGroupField
26 from lego.apps.users.models import AbakusGroup
27 from lego.apps.users.serializers.users import PublicUserSerializer
28 from lego.utils.serializers import BasisModelSerializer
29
30
31 class EventPublicSerializer(BasisModelSerializer):
32
33 thumbnail = ImageField(
34 source="cover",
35 required=False,
36 options={"height": 500, "width": 500, "smart": True},
37 )
38
39 class Meta:
40 model = Event
41 fields = ("id", "title", "description", "event_type", "location", "thumbnail")
42 read_only = True
43
44
45 class EventReadSerializer(TagSerializerMixin, BasisModelSerializer):
46 company = CompanyField(queryset=Company.objects.all())
47 cover = ImageField(required=False, options={"height": 500})
48 thumbnail = ImageField(
49 source="cover",
50 required=False,
51 options={"height": 500, "width": 500, "smart": True},
52 )
53 activation_time = ActivationTimeField()
54
55 class Meta:
56 model = Event
57 fields = (
58 "id",
59 "title",
60 "description",
61 "cover",
62 "event_type",
63 "location",
64 "start_time",
65 "thumbnail",
66 "total_capacity",
67 "company",
68 "registration_count",
69 "tags",
70 "activation_time",
71 )
72 read_only = True
73
74
75 class EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):
76 comments = CommentSerializer(read_only=True, many=True)
77 comment_target = CharField(read_only=True)
78 cover = ImageField(required=False, options={"height": 500})
79 company = CompanyField(queryset=Company.objects.all())
80 responsible_group = AbakusGroupField(
81 queryset=AbakusGroup.objects.all(), required=False, allow_null=True
82 )
83 pools = PoolReadSerializer(many=True)
84 active_capacity = serializers.ReadOnlyField()
85 text = ContentSerializerField()
86 created_by = PublicUserSerializer()
87
88 registration_close_time = serializers.DateTimeField(read_only=True)
89
90 class Meta:
91 model = Event
92 fields = (
93 "id",
94 "title",
95 "description",
96 "cover",
97 "text",
98 "event_type",
99 "location",
100 "comments",
101 "comment_target",
102 "start_time",
103 "end_time",
104 "merge_time",
105 "pools",
106 "registration_close_time",
107 "registration_deadline_hours",
108 "unregistration_deadline",
109 "company",
110 "responsible_group",
111 "active_capacity",
112 "feedback_description",
113 "feedback_required",
114 "is_priced",
115 "price_member",
116 "price_guest",
117 "use_stripe",
118 "payment_due_date",
119 "use_captcha",
120 "waiting_registration_count",
121 "tags",
122 "is_merged",
123 "heed_penalties",
124 "created_by",
125 "is_abakom_only",
126 "registration_count",
127 "survey",
128 "use_consent",
129 )
130 read_only = True
131
132
133 class EventForSurveySerializer(EventReadSerializer):
134 attended_count = serializers.SerializerMethodField()
135
136 class Meta:
137 model = Event
138 fields = EventReadSerializer.Meta.fields + (
139 "registration_count",
140 "waiting_registration_count",
141 "attended_count",
142 )
143 read_only = True
144
145 def get_attended_count(self, event):
146 return event.registrations.filter(presence=PRESENT).count()
147
148
149 class EventUserRegSerializer(EventReadSerializer):
150 user_reg = serializers.SerializerMethodField()
151
152 class Meta:
153 model = Event
154 fields = EventReadSerializer.Meta.fields + ("user_reg",)
155 read_only = True
156
157 def get_user_reg(self, event):
158 return RegistrationReadSerializer(event.user_reg[0]).data
159
160
161 class EventReadUserDetailedSerializer(EventReadDetailedSerializer):
162 """ User specfic event serializer that appends data based on request.user """
163
164 activation_time = ActivationTimeField()
165 spots_left = SpotsLeftField()
166 price = serializers.SerializerMethodField()
167
168 class Meta(EventReadDetailedSerializer.Meta):
169 fields = EventReadDetailedSerializer.Meta.fields + (
170 "price",
171 "activation_time",
172 "spots_left",
173 )
174
175 def get_price(self, obj):
176 request = self.context.get("request", None)
177 if request:
178 return obj.get_price(user=request.user)
179
180
181 class EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):
182 pools = PoolReadAuthSerializer(many=True)
183 waiting_registrations = RegistrationReadSerializer(many=True)
184 unanswered_surveys = serializers.SerializerMethodField()
185
186 class Meta(EventReadUserDetailedSerializer.Meta):
187 fields = EventReadUserDetailedSerializer.Meta.fields + (
188 "waiting_registrations",
189 "unanswered_surveys",
190 )
191
192 def get_unanswered_surveys(self, obj):
193 request = self.context.get("request", None)
194 return request.user.unanswered_surveys()
195
196
197 class EventAdministrateSerializer(EventReadSerializer):
198 pools = PoolAdministrateSerializer(many=True)
199 unregistered = RegistrationReadDetailedSerializer(many=True)
200 waiting_registrations = RegistrationReadDetailedSerializer(many=True)
201
202 class Meta(EventReadSerializer.Meta):
203 fields = EventReadSerializer.Meta.fields + (
204 "pools",
205 "unregistered",
206 "waiting_registrations",
207 "use_consent",
208 )
209
210
211 class EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):
212 cover = ImageField(required=False, options={"height": 500})
213 responsible_group = AbakusGroupField(
214 queryset=AbakusGroup.objects.all(), required=False, allow_null=True
215 )
216 pools = PoolCreateAndUpdateSerializer(many=True, required=False)
217 text = ContentSerializerField()
218 is_abakom_only = BooleanField(required=False, default=False)
219
220 registration_close_time = serializers.DateTimeField(read_only=True)
221
222 class Meta:
223 model = Event
224 fields = (
225 "id",
226 "title",
227 "cover",
228 "description",
229 "text",
230 "company",
231 "responsible_group",
232 "feedback_description",
233 "feedback_required",
234 "event_type",
235 "location",
236 "is_priced",
237 "price_member",
238 "price_guest",
239 "use_stripe",
240 "payment_due_date",
241 "start_time",
242 "end_time",
243 "merge_time",
244 "use_captcha",
245 "tags",
246 "pools",
247 "unregistration_deadline",
248 "pinned",
249 "use_consent",
250 "heed_penalties",
251 "is_abakom_only",
252 "registration_deadline_hours",
253 "registration_close_time",
254 )
255
256 def validate(self, data):
257 """
258 Check that start is before finish.
259 """
260 if hasattr(data, "start_time") and hasattr(data, "end_time"):
261 if data["start_time"] > data["end_time"]:
262 raise serializers.ValidationError(
263 {
264 "end_time": "User does not have the required permissions for time travel"
265 }
266 )
267 return data
268
269 def create(self, validated_data):
270 pools = validated_data.pop("pools", [])
271 is_abakom_only = validated_data.pop("is_abakom_only", False)
272 with transaction.atomic():
273 event = super().create(validated_data)
274 for pool in pools:
275 permission_groups = pool.pop("permission_groups")
276 created_pool = Pool.objects.create(event=event, **pool)
277 created_pool.permission_groups.set(permission_groups)
278 event.set_abakom_only(is_abakom_only)
279 return event
280
281 def update(self, instance, validated_data):
282 pools = validated_data.pop("pools", None)
283 is_abakom_only = validated_data.pop("is_abakom_only", False)
284 with transaction.atomic():
285 if pools is not None:
286 existing_pools = list(instance.pools.all().values_list("id", flat=True))
287 for pool in pools:
288 pool_id = pool.get("id", None)
289 if pool_id in existing_pools:
290 existing_pools.remove(pool_id)
291 permission_groups = pool.pop("permission_groups")
292 created_pool = Pool.objects.update_or_create(
293 event=instance,
294 id=pool_id,
295 defaults={
296 "name": pool.get("name"),
297 "capacity": pool.get("capacity", 0),
298 "activation_date": pool.get("activation_date"),
299 },
300 )[0]
301 created_pool.permission_groups.set(permission_groups)
302 for pool_id in existing_pools:
303 Pool.objects.get(id=pool_id).delete()
304 instance.set_abakom_only(is_abakom_only)
305 return super().update(instance, validated_data)
306
307
308 class EventSearchSerializer(serializers.ModelSerializer):
309 cover = ImageField(required=False, options={"height": 500})
310 thumbnail = ImageField(
311 source="cover",
312 required=False,
313 options={"height": 500, "width": 500, "smart": True},
314 )
315 text = ContentSerializerField()
316 activation_time = ActivationTimeField()
317
318 class Meta:
319 model = Event
320 fields = (
321 "id",
322 "title",
323 "description",
324 "cover",
325 "text",
326 "event_type",
327 "location",
328 "start_time",
329 "thumbnail",
330 "end_time",
331 "total_capacity",
332 "company",
333 "registration_count",
334 "tags",
335 "activation_time",
336 "pinned",
337 )
338 read_only = True
339
340
341 def populate_event_registration_users_with_grade(event_dict):
342 """
343 Populates every user in registrations in a serialized event with `grade`.
344 Mainly used in the administrate endpoint
345 :param event_dict:
346 :return:
347 """
348 grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values("id", "name")
349 grade_dict = {item["id"]: item for item in grades}
350 for pool in event_dict.get("pools", []):
351 for registration in pool.get("registrations", []):
352 user = registration.get("user", {})
353 abakus_groups = user.get("abakus_groups", [])
354 user["grade"] = None
355 for id in abakus_groups:
356 grade = grade_dict.get(id, None)
357 if grade:
358 user["grade"] = grade
359 return event_dict
360
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py
--- a/lego/apps/events/serializers/events.py
+++ b/lego/apps/events/serializers/events.py
@@ -62,6 +62,7 @@
"event_type",
"location",
"start_time",
+ "end_time",
"thumbnail",
"total_capacity",
"company",
| {"golden_diff": "diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py\n--- a/lego/apps/events/serializers/events.py\n+++ b/lego/apps/events/serializers/events.py\n@@ -62,6 +62,7 @@\n \"event_type\",\n \"location\",\n \"start_time\",\n+ \"end_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n", "issue": "Add end_time of an event when getting all events with get request\nI want to be able to get the end time of an event when getting all events. I know I can get the end time when getting a specific event, but it is a bit cumbersome.\n", "before_files": [{"content": "from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.fields import BooleanField, CharField\n\nfrom lego.apps.comments.serializers import CommentSerializer\nfrom lego.apps.companies.fields import CompanyField\nfrom lego.apps.companies.models import Company\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.events.constants import PRESENT\nfrom lego.apps.events.fields import ActivationTimeField, SpotsLeftField\nfrom lego.apps.events.models import Event, Pool\nfrom lego.apps.events.serializers.pools import (\n PoolAdministrateSerializer,\n PoolCreateAndUpdateSerializer,\n PoolReadAuthSerializer,\n PoolReadSerializer,\n)\nfrom lego.apps.events.serializers.registrations import (\n RegistrationReadDetailedSerializer,\n RegistrationReadSerializer,\n)\nfrom lego.apps.files.fields import ImageField\nfrom lego.apps.tags.serializers import TagSerializerMixin\nfrom lego.apps.users.constants import GROUP_GRADE\nfrom lego.apps.users.fields import AbakusGroupField\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass EventPublicSerializer(BasisModelSerializer):\n\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n\n class Meta:\n model = Event\n fields = (\"id\", \"title\", \"description\", \"event_type\", \"location\", \"thumbnail\")\n read_only = True\n\n\nclass EventReadSerializer(TagSerializerMixin, BasisModelSerializer):\n company = CompanyField(queryset=Company.objects.all())\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n )\n read_only = True\n\n\nclass EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):\n comments = CommentSerializer(read_only=True, many=True)\n comment_target = CharField(read_only=True)\n cover = ImageField(required=False, options={\"height\": 500})\n company = CompanyField(queryset=Company.objects.all())\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolReadSerializer(many=True)\n active_capacity = serializers.ReadOnlyField()\n text = ContentSerializerField()\n created_by = PublicUserSerializer()\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"comments\",\n \"comment_target\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"pools\",\n \"registration_close_time\",\n \"registration_deadline_hours\",\n \"unregistration_deadline\",\n \"company\",\n \"responsible_group\",\n \"active_capacity\",\n \"feedback_description\",\n \"feedback_required\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"use_captcha\",\n \"waiting_registration_count\",\n \"tags\",\n \"is_merged\",\n \"heed_penalties\",\n \"created_by\",\n \"is_abakom_only\",\n \"registration_count\",\n \"survey\",\n \"use_consent\",\n )\n read_only = True\n\n\nclass EventForSurveySerializer(EventReadSerializer):\n attended_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\n \"registration_count\",\n \"waiting_registration_count\",\n \"attended_count\",\n )\n read_only = True\n\n def get_attended_count(self, event):\n return event.registrations.filter(presence=PRESENT).count()\n\n\nclass EventUserRegSerializer(EventReadSerializer):\n user_reg = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\"user_reg\",)\n read_only = True\n\n def get_user_reg(self, event):\n return RegistrationReadSerializer(event.user_reg[0]).data\n\n\nclass EventReadUserDetailedSerializer(EventReadDetailedSerializer):\n \"\"\" User specfic event serializer that appends data based on request.user \"\"\"\n\n activation_time = ActivationTimeField()\n spots_left = SpotsLeftField()\n price = serializers.SerializerMethodField()\n\n class Meta(EventReadDetailedSerializer.Meta):\n fields = EventReadDetailedSerializer.Meta.fields + (\n \"price\",\n \"activation_time\",\n \"spots_left\",\n )\n\n def get_price(self, obj):\n request = self.context.get(\"request\", None)\n if request:\n return obj.get_price(user=request.user)\n\n\nclass EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):\n pools = PoolReadAuthSerializer(many=True)\n waiting_registrations = RegistrationReadSerializer(many=True)\n unanswered_surveys = serializers.SerializerMethodField()\n\n class Meta(EventReadUserDetailedSerializer.Meta):\n fields = EventReadUserDetailedSerializer.Meta.fields + (\n \"waiting_registrations\",\n \"unanswered_surveys\",\n )\n\n def get_unanswered_surveys(self, obj):\n request = self.context.get(\"request\", None)\n return request.user.unanswered_surveys()\n\n\nclass EventAdministrateSerializer(EventReadSerializer):\n pools = PoolAdministrateSerializer(many=True)\n unregistered = RegistrationReadDetailedSerializer(many=True)\n waiting_registrations = RegistrationReadDetailedSerializer(many=True)\n\n class Meta(EventReadSerializer.Meta):\n fields = EventReadSerializer.Meta.fields + (\n \"pools\",\n \"unregistered\",\n \"waiting_registrations\",\n \"use_consent\",\n )\n\n\nclass EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolCreateAndUpdateSerializer(many=True, required=False)\n text = ContentSerializerField()\n is_abakom_only = BooleanField(required=False, default=False)\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"cover\",\n \"description\",\n \"text\",\n \"company\",\n \"responsible_group\",\n \"feedback_description\",\n \"feedback_required\",\n \"event_type\",\n \"location\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"use_captcha\",\n \"tags\",\n \"pools\",\n \"unregistration_deadline\",\n \"pinned\",\n \"use_consent\",\n \"heed_penalties\",\n \"is_abakom_only\",\n \"registration_deadline_hours\",\n \"registration_close_time\",\n )\n\n def validate(self, data):\n \"\"\"\n Check that start is before finish.\n \"\"\"\n if hasattr(data, \"start_time\") and hasattr(data, \"end_time\"):\n if data[\"start_time\"] > data[\"end_time\"]:\n raise serializers.ValidationError(\n {\n \"end_time\": \"User does not have the required permissions for time travel\"\n }\n )\n return data\n\n def create(self, validated_data):\n pools = validated_data.pop(\"pools\", [])\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n event = super().create(validated_data)\n for pool in pools:\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.create(event=event, **pool)\n created_pool.permission_groups.set(permission_groups)\n event.set_abakom_only(is_abakom_only)\n return event\n\n def update(self, instance, validated_data):\n pools = validated_data.pop(\"pools\", None)\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n if pools is not None:\n existing_pools = list(instance.pools.all().values_list(\"id\", flat=True))\n for pool in pools:\n pool_id = pool.get(\"id\", None)\n if pool_id in existing_pools:\n existing_pools.remove(pool_id)\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.update_or_create(\n event=instance,\n id=pool_id,\n defaults={\n \"name\": pool.get(\"name\"),\n \"capacity\": pool.get(\"capacity\", 0),\n \"activation_date\": pool.get(\"activation_date\"),\n },\n )[0]\n created_pool.permission_groups.set(permission_groups)\n for pool_id in existing_pools:\n Pool.objects.get(id=pool_id).delete()\n instance.set_abakom_only(is_abakom_only)\n return super().update(instance, validated_data)\n\n\nclass EventSearchSerializer(serializers.ModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n text = ContentSerializerField()\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"end_time\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n \"pinned\",\n )\n read_only = True\n\n\ndef populate_event_registration_users_with_grade(event_dict):\n \"\"\"\n Populates every user in registrations in a serialized event with `grade`.\n Mainly used in the administrate endpoint\n :param event_dict:\n :return:\n \"\"\"\n grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values(\"id\", \"name\")\n grade_dict = {item[\"id\"]: item for item in grades}\n for pool in event_dict.get(\"pools\", []):\n for registration in pool.get(\"registrations\", []):\n user = registration.get(\"user\", {})\n abakus_groups = user.get(\"abakus_groups\", [])\n user[\"grade\"] = None\n for id in abakus_groups:\n grade = grade_dict.get(id, None)\n if grade:\n user[\"grade\"] = grade\n return event_dict\n", "path": "lego/apps/events/serializers/events.py"}], "after_files": [{"content": "from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.fields import BooleanField, CharField\n\nfrom lego.apps.comments.serializers import CommentSerializer\nfrom lego.apps.companies.fields import CompanyField\nfrom lego.apps.companies.models import Company\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.events.constants import PRESENT\nfrom lego.apps.events.fields import ActivationTimeField, SpotsLeftField\nfrom lego.apps.events.models import Event, Pool\nfrom lego.apps.events.serializers.pools import (\n PoolAdministrateSerializer,\n PoolCreateAndUpdateSerializer,\n PoolReadAuthSerializer,\n PoolReadSerializer,\n)\nfrom lego.apps.events.serializers.registrations import (\n RegistrationReadDetailedSerializer,\n RegistrationReadSerializer,\n)\nfrom lego.apps.files.fields import ImageField\nfrom lego.apps.tags.serializers import TagSerializerMixin\nfrom lego.apps.users.constants import GROUP_GRADE\nfrom lego.apps.users.fields import AbakusGroupField\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass EventPublicSerializer(BasisModelSerializer):\n\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n\n class Meta:\n model = Event\n fields = (\"id\", \"title\", \"description\", \"event_type\", \"location\", \"thumbnail\")\n read_only = True\n\n\nclass EventReadSerializer(TagSerializerMixin, BasisModelSerializer):\n company = CompanyField(queryset=Company.objects.all())\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"end_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n )\n read_only = True\n\n\nclass EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):\n comments = CommentSerializer(read_only=True, many=True)\n comment_target = CharField(read_only=True)\n cover = ImageField(required=False, options={\"height\": 500})\n company = CompanyField(queryset=Company.objects.all())\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolReadSerializer(many=True)\n active_capacity = serializers.ReadOnlyField()\n text = ContentSerializerField()\n created_by = PublicUserSerializer()\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"comments\",\n \"comment_target\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"pools\",\n \"registration_close_time\",\n \"registration_deadline_hours\",\n \"unregistration_deadline\",\n \"company\",\n \"responsible_group\",\n \"active_capacity\",\n \"feedback_description\",\n \"feedback_required\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"use_captcha\",\n \"waiting_registration_count\",\n \"tags\",\n \"is_merged\",\n \"heed_penalties\",\n \"created_by\",\n \"is_abakom_only\",\n \"registration_count\",\n \"survey\",\n \"use_consent\",\n )\n read_only = True\n\n\nclass EventForSurveySerializer(EventReadSerializer):\n attended_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\n \"registration_count\",\n \"waiting_registration_count\",\n \"attended_count\",\n )\n read_only = True\n\n def get_attended_count(self, event):\n return event.registrations.filter(presence=PRESENT).count()\n\n\nclass EventUserRegSerializer(EventReadSerializer):\n user_reg = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\"user_reg\",)\n read_only = True\n\n def get_user_reg(self, event):\n return RegistrationReadSerializer(event.user_reg[0]).data\n\n\nclass EventReadUserDetailedSerializer(EventReadDetailedSerializer):\n \"\"\" User specfic event serializer that appends data based on request.user \"\"\"\n\n activation_time = ActivationTimeField()\n spots_left = SpotsLeftField()\n price = serializers.SerializerMethodField()\n\n class Meta(EventReadDetailedSerializer.Meta):\n fields = EventReadDetailedSerializer.Meta.fields + (\n \"price\",\n \"activation_time\",\n \"spots_left\",\n )\n\n def get_price(self, obj):\n request = self.context.get(\"request\", None)\n if request:\n return obj.get_price(user=request.user)\n\n\nclass EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):\n pools = PoolReadAuthSerializer(many=True)\n waiting_registrations = RegistrationReadSerializer(many=True)\n unanswered_surveys = serializers.SerializerMethodField()\n\n class Meta(EventReadUserDetailedSerializer.Meta):\n fields = EventReadUserDetailedSerializer.Meta.fields + (\n \"waiting_registrations\",\n \"unanswered_surveys\",\n )\n\n def get_unanswered_surveys(self, obj):\n request = self.context.get(\"request\", None)\n return request.user.unanswered_surveys()\n\n\nclass EventAdministrateSerializer(EventReadSerializer):\n pools = PoolAdministrateSerializer(many=True)\n unregistered = RegistrationReadDetailedSerializer(many=True)\n waiting_registrations = RegistrationReadDetailedSerializer(many=True)\n\n class Meta(EventReadSerializer.Meta):\n fields = EventReadSerializer.Meta.fields + (\n \"pools\",\n \"unregistered\",\n \"waiting_registrations\",\n \"use_consent\",\n )\n\n\nclass EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolCreateAndUpdateSerializer(many=True, required=False)\n text = ContentSerializerField()\n is_abakom_only = BooleanField(required=False, default=False)\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"cover\",\n \"description\",\n \"text\",\n \"company\",\n \"responsible_group\",\n \"feedback_description\",\n \"feedback_required\",\n \"event_type\",\n \"location\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"use_captcha\",\n \"tags\",\n \"pools\",\n \"unregistration_deadline\",\n \"pinned\",\n \"use_consent\",\n \"heed_penalties\",\n \"is_abakom_only\",\n \"registration_deadline_hours\",\n \"registration_close_time\",\n )\n\n def validate(self, data):\n \"\"\"\n Check that start is before finish.\n \"\"\"\n if hasattr(data, \"start_time\") and hasattr(data, \"end_time\"):\n if data[\"start_time\"] > data[\"end_time\"]:\n raise serializers.ValidationError(\n {\n \"end_time\": \"User does not have the required permissions for time travel\"\n }\n )\n return data\n\n def create(self, validated_data):\n pools = validated_data.pop(\"pools\", [])\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n event = super().create(validated_data)\n for pool in pools:\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.create(event=event, **pool)\n created_pool.permission_groups.set(permission_groups)\n event.set_abakom_only(is_abakom_only)\n return event\n\n def update(self, instance, validated_data):\n pools = validated_data.pop(\"pools\", None)\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n if pools is not None:\n existing_pools = list(instance.pools.all().values_list(\"id\", flat=True))\n for pool in pools:\n pool_id = pool.get(\"id\", None)\n if pool_id in existing_pools:\n existing_pools.remove(pool_id)\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.update_or_create(\n event=instance,\n id=pool_id,\n defaults={\n \"name\": pool.get(\"name\"),\n \"capacity\": pool.get(\"capacity\", 0),\n \"activation_date\": pool.get(\"activation_date\"),\n },\n )[0]\n created_pool.permission_groups.set(permission_groups)\n for pool_id in existing_pools:\n Pool.objects.get(id=pool_id).delete()\n instance.set_abakom_only(is_abakom_only)\n return super().update(instance, validated_data)\n\n\nclass EventSearchSerializer(serializers.ModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n text = ContentSerializerField()\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"end_time\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n \"pinned\",\n )\n read_only = True\n\n\ndef populate_event_registration_users_with_grade(event_dict):\n \"\"\"\n Populates every user in registrations in a serialized event with `grade`.\n Mainly used in the administrate endpoint\n :param event_dict:\n :return:\n \"\"\"\n grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values(\"id\", \"name\")\n grade_dict = {item[\"id\"]: item for item in grades}\n for pool in event_dict.get(\"pools\", []):\n for registration in pool.get(\"registrations\", []):\n user = registration.get(\"user\", {})\n abakus_groups = user.get(\"abakus_groups\", [])\n user[\"grade\"] = None\n for id in abakus_groups:\n grade = grade_dict.get(id, None)\n if grade:\n user[\"grade\"] = grade\n return event_dict\n", "path": "lego/apps/events/serializers/events.py"}]} | 3,697 | 97 |
gh_patches_debug_5099 | rasdani/github-patches | git_diff | translate__pootle-6747 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add current character count when entering translations
We're trying to use Pootle to translate metadata strings for an app in the iOS AppStore. The metadata includes individual messages for the app name, subtitle, description, privacy URL and so on and there are different limits on the number of characters allowed in each of them. For instance, an app's name can be no more than 30 characters.
When entering translations, it would be really helpful to see the current number of characters that the translation uses as you type to ensure that you're not exceeding the limit. This could maybe fit on the lower right corner of the input view. You currently have timeline / comment / raw on the left. Current character count could just be a small label that floats to the right on the same line.
# Environment (i.e. 'pootle --version', DB, OS, Browser):
Pootle 2.8.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/core/templatetags/core.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django import template
11 from django.utils.html import escapejs
12 from django.utils.safestring import mark_safe
13
14 from ..utils.json import jsonify
15
16
17 register = template.Library()
18
19
20 @register.filter
21 def to_js(value):
22 """Returns a string which leaves the value readily available for JS
23 consumption.
24 """
25 return mark_safe('JSON.parse("%s")' % escapejs(jsonify(value)))
26
27
28 @register.inclusion_tag('includes/formtable.html')
29 def formtable(formtable):
30 return dict(formtable=formtable)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/core/templatetags/core.py b/pootle/core/templatetags/core.py
--- a/pootle/core/templatetags/core.py
+++ b/pootle/core/templatetags/core.py
@@ -25,6 +25,13 @@
return mark_safe('JSON.parse("%s")' % escapejs(jsonify(value)))
[email protected]
+def map_to_lengths(value):
+ """Maps a list value by replacing each element with its length.
+ """
+ return [len(e) for e in value]
+
+
@register.inclusion_tag('includes/formtable.html')
def formtable(formtable):
return dict(formtable=formtable)
| {"golden_diff": "diff --git a/pootle/core/templatetags/core.py b/pootle/core/templatetags/core.py\n--- a/pootle/core/templatetags/core.py\n+++ b/pootle/core/templatetags/core.py\n@@ -25,6 +25,13 @@\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n \n \[email protected]\n+def map_to_lengths(value):\n+ \"\"\"Maps a list value by replacing each element with its length.\n+ \"\"\"\n+ return [len(e) for e in value]\n+\n+\n @register.inclusion_tag('includes/formtable.html')\n def formtable(formtable):\n return dict(formtable=formtable)\n", "issue": "Add current character count when entering translations\nWe're trying to use Pootle to translate metadata strings for an app in the iOS AppStore. The metadata includes individual messages for the app name, subtitle, description, privacy URL and so on and there are different limits on the number of characters allowed in each of them. For instance, an app's name can be no more than 30 characters. \r\n\r\nWhen entering translations, it would be really helpful to see the current number of characters that the translation uses as you type to ensure that you're not exceeding the limit. This could maybe fit on the lower right corner of the input view. You currently have timeline / comment / raw on the left. Current character count could just be a small label that floats to the right on the same line.\r\n\r\n# Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\nPootle 2.8.0\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.html import escapejs\nfrom django.utils.safestring import mark_safe\n\nfrom ..utils.json import jsonify\n\n\nregister = template.Library()\n\n\[email protected]\ndef to_js(value):\n \"\"\"Returns a string which leaves the value readily available for JS\n consumption.\n \"\"\"\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n\n\[email protected]_tag('includes/formtable.html')\ndef formtable(formtable):\n return dict(formtable=formtable)\n", "path": "pootle/core/templatetags/core.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.html import escapejs\nfrom django.utils.safestring import mark_safe\n\nfrom ..utils.json import jsonify\n\n\nregister = template.Library()\n\n\[email protected]\ndef to_js(value):\n \"\"\"Returns a string which leaves the value readily available for JS\n consumption.\n \"\"\"\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n\n\[email protected]\ndef map_to_lengths(value):\n \"\"\"Maps a list value by replacing each element with its length.\n \"\"\"\n return [len(e) for e in value]\n\n\[email protected]_tag('includes/formtable.html')\ndef formtable(formtable):\n return dict(formtable=formtable)\n", "path": "pootle/core/templatetags/core.py"}]} | 697 | 157 |
gh_patches_debug_3964 | rasdani/github-patches | git_diff | learningequality__kolibri-11933 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSV Report generation API tests fail when the current date in the active timezone is different to the UTC date
## Observed behavior
When running the tests in `kolibri/plugins/facility/test/test_api.py` 2 tests can sometimes fail - the ones to do with generating a CSV Summary Log or Session Log report.
This occurs when the date as reported on the local machine in the currently set timezone is different to the current date according to UTC. In my specific case, the tests would fail when I ran them after 4pm PST (UTC - 8). I have not tested, but would assume that a similar issue would occur running the tests at say 7am in (UTC + 8) timezone.
## Errors and logs
```
kolibri/plugins/facility/test/test_api.py F..F..... [100%]
==================================================================================================== FAILURES =====================================================================================================
_____________________________________________________________________ ContentSummaryLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________
self = <kolibri.plugins.facility.test.test_api.ContentSummaryLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268344491728'>
@mock.patch.object(log_exports_cleanup, "enqueue", return_value=None)
def test_csv_download_admin_permissions(self, mock_enqueue):
call_command(
"exportlogs",
log_type="summary",
output_file=output_filename(
"summary",
self.facility,
start_date=self.start_date,
end_date=self.end_date,
),
overwrite=True,
start_date=self.start_date,
end_date=self.end_date,
)
self.client.login(
username=self.admin.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
response = self.client.get(
reverse(
"kolibri:kolibri.plugins.facility:download_csv_file",
kwargs={"csv_type": "summary", "facility_id": self.facility.id},
)
)
> self.assertEqual(response.status_code, 200)
E AssertionError: 404 != 200
kolibri/plugins/facility/test/test_api.py:149: AssertionError
---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------
Installed 2 object(s) from 1 fixture(s)
---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------
INFO 2024-03-01 16:58:51,322 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv
INFO 2024-03-01 16:58:51,327 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
INFO 2024-03-01 16:58:51,504 127.0.0.1 - - "GET /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/" 404 0 "" "unknown"
WARNING 2024-03-01 16:58:51,504 Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/
------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------
csv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv
exportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
log.py 224 WARNING Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/
_____________________________________________________________________ ContentSessionLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________
self = <kolibri.plugins.facility.test.test_api.ContentSessionLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268351318224'>
@mock.patch.object(log_exports_cleanup, "enqueue", return_value=None)
def test_csv_download_admin_permissions(self, mock_enqueue):
call_command(
"exportlogs",
log_type="session",
output_file=output_filename(
"session",
self.facility,
start_date=self.start_date,
end_date=self.end_date,
),
overwrite=True,
start_date=self.start_date,
end_date=self.end_date,
)
self.client.login(
username=self.admin.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
response = self.client.get(
reverse(
"kolibri:kolibri.plugins.facility:download_csv_file",
kwargs={"csv_type": "session", "facility_id": self.facility.id},
)
)
> self.assertEqual(response.status_code, 200)
E AssertionError: 404 != 200
kolibri/plugins/facility/test/test_api.py:252: AssertionError
---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------
Installed 2 object(s) from 1 fixture(s)
---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------
INFO 2024-03-01 16:58:52,079 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv
INFO 2024-03-01 16:58:52,084 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
INFO 2024-03-01 16:58:52,177 127.0.0.1 - - "GET /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/" 404 0 "" "unknown"
WARNING 2024-03-01 16:58:52,177 Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/
------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------
csv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv
exportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
log.py 224 WARNING Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/
======================================================================================= 2 failed, 7 passed in 13.86 seconds =======================================================================================
```
Note - I did a test of the actual functionality that relies on this in the Facility plugin, and observed no issues, so this seems at the moment to be purely a testing artifact.
## Expected behavior
Tests should pass regardless of time of day or timezone!
## User-facing consequences
I think none - but would be good to work out why this is happening to be sure.
## Steps to reproduce
Set your system timezone to PST.
Set your system time to after 4pm.
Run the test suite above with pytest.
## Context
Noticed in the develop branch, but also extant on release-v0.16.x
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/utils/time_utils.py`
Content:
```
1 from django.utils import timezone
2
3
4 def local_now():
5 return timezone.localtime(timezone.now())
6
7
8 def naive_utc_datetime(dt):
9 return timezone.make_naive(dt, timezone=timezone.utc)
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/utils/time_utils.py b/kolibri/utils/time_utils.py
--- a/kolibri/utils/time_utils.py
+++ b/kolibri/utils/time_utils.py
@@ -2,8 +2,18 @@
def local_now():
+ """
+ Returns the current time in the local timezone.
+ """
return timezone.localtime(timezone.now())
+def utc_now():
+ """
+ Returns the current time in the UTC timezone.
+ """
+ return timezone.now()
+
+
def naive_utc_datetime(dt):
return timezone.make_naive(dt, timezone=timezone.utc)
| {"golden_diff": "diff --git a/kolibri/utils/time_utils.py b/kolibri/utils/time_utils.py\n--- a/kolibri/utils/time_utils.py\n+++ b/kolibri/utils/time_utils.py\n@@ -2,8 +2,18 @@\n \n \n def local_now():\n+ \"\"\"\n+ Returns the current time in the local timezone.\n+ \"\"\"\n return timezone.localtime(timezone.now())\n \n \n+def utc_now():\n+ \"\"\"\n+ Returns the current time in the UTC timezone.\n+ \"\"\"\n+ return timezone.now()\n+\n+\n def naive_utc_datetime(dt):\n return timezone.make_naive(dt, timezone=timezone.utc)\n", "issue": "CSV Report generation API tests fail when the current date in the active timezone is different to the UTC date\n## Observed behavior\r\nWhen running the tests in `kolibri/plugins/facility/test/test_api.py` 2 tests can sometimes fail - the ones to do with generating a CSV Summary Log or Session Log report.\r\n\r\nThis occurs when the date as reported on the local machine in the currently set timezone is different to the current date according to UTC. In my specific case, the tests would fail when I ran them after 4pm PST (UTC - 8). I have not tested, but would assume that a similar issue would occur running the tests at say 7am in (UTC + 8) timezone.\r\n\r\n## Errors and logs\r\n```\r\nkolibri/plugins/facility/test/test_api.py F..F..... [100%]\r\n\r\n==================================================================================================== FAILURES =====================================================================================================\r\n_____________________________________________________________________ ContentSummaryLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________\r\n\r\nself = <kolibri.plugins.facility.test.test_api.ContentSummaryLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268344491728'>\r\n\r\n @mock.patch.object(log_exports_cleanup, \"enqueue\", return_value=None)\r\n def test_csv_download_admin_permissions(self, mock_enqueue):\r\n call_command(\r\n \"exportlogs\",\r\n log_type=\"summary\",\r\n output_file=output_filename(\r\n \"summary\",\r\n self.facility,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n ),\r\n overwrite=True,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n )\r\n self.client.login(\r\n username=self.admin.username,\r\n password=DUMMY_PASSWORD,\r\n facility=self.facility,\r\n )\r\n response = self.client.get(\r\n reverse(\r\n \"kolibri:kolibri.plugins.facility:download_csv_file\",\r\n kwargs={\"csv_type\": \"summary\", \"facility_id\": self.facility.id},\r\n )\r\n )\r\n> self.assertEqual(response.status_code, 200)\r\nE AssertionError: 404 != 200\r\n\r\nkolibri/plugins/facility/test/test_api.py:149: AssertionError\r\n---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------\r\nInstalled 2 object(s) from 1 fixture(s)\r\n---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------\r\nINFO 2024-03-01 16:58:51,322 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv\r\nINFO 2024-03-01 16:58:51,327 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nINFO 2024-03-01 16:58:51,504 127.0.0.1 - - \"GET /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/\" 404 0 \"\" \"unknown\"\r\nWARNING 2024-03-01 16:58:51,504 Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/\r\n------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------\r\ncsv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv\r\nexportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nlog.py 224 WARNING Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/\r\n_____________________________________________________________________ ContentSessionLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________\r\n\r\nself = <kolibri.plugins.facility.test.test_api.ContentSessionLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268351318224'>\r\n\r\n @mock.patch.object(log_exports_cleanup, \"enqueue\", return_value=None)\r\n def test_csv_download_admin_permissions(self, mock_enqueue):\r\n call_command(\r\n \"exportlogs\",\r\n log_type=\"session\",\r\n output_file=output_filename(\r\n \"session\",\r\n self.facility,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n ),\r\n overwrite=True,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n )\r\n self.client.login(\r\n username=self.admin.username,\r\n password=DUMMY_PASSWORD,\r\n facility=self.facility,\r\n )\r\n response = self.client.get(\r\n reverse(\r\n \"kolibri:kolibri.plugins.facility:download_csv_file\",\r\n kwargs={\"csv_type\": \"session\", \"facility_id\": self.facility.id},\r\n )\r\n )\r\n> self.assertEqual(response.status_code, 200)\r\nE AssertionError: 404 != 200\r\n\r\nkolibri/plugins/facility/test/test_api.py:252: AssertionError\r\n---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------\r\nInstalled 2 object(s) from 1 fixture(s)\r\n---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------\r\nINFO 2024-03-01 16:58:52,079 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv\r\nINFO 2024-03-01 16:58:52,084 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nINFO 2024-03-01 16:58:52,177 127.0.0.1 - - \"GET /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/\" 404 0 \"\" \"unknown\"\r\nWARNING 2024-03-01 16:58:52,177 Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/\r\n------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------\r\ncsv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv\r\nexportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nlog.py 224 WARNING Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/\r\n======================================================================================= 2 failed, 7 passed in 13.86 seconds =======================================================================================\r\n```\r\n\r\nNote - I did a test of the actual functionality that relies on this in the Facility plugin, and observed no issues, so this seems at the moment to be purely a testing artifact.\r\n\r\n## Expected behavior\r\nTests should pass regardless of time of day or timezone!\r\n\r\n## User-facing consequences\r\nI think none - but would be good to work out why this is happening to be sure.\r\n\r\n## Steps to reproduce\r\nSet your system timezone to PST.\r\nSet your system time to after 4pm.\r\nRun the test suite above with pytest.\r\n\r\n## Context\r\n\r\nNoticed in the develop branch, but also extant on release-v0.16.x\r\n\n", "before_files": [{"content": "from django.utils import timezone\n\n\ndef local_now():\n return timezone.localtime(timezone.now())\n\n\ndef naive_utc_datetime(dt):\n return timezone.make_naive(dt, timezone=timezone.utc)\n", "path": "kolibri/utils/time_utils.py"}], "after_files": [{"content": "from django.utils import timezone\n\n\ndef local_now():\n \"\"\"\n Returns the current time in the local timezone.\n \"\"\"\n return timezone.localtime(timezone.now())\n\n\ndef utc_now():\n \"\"\"\n Returns the current time in the UTC timezone.\n \"\"\"\n return timezone.now()\n\n\ndef naive_utc_datetime(dt):\n return timezone.make_naive(dt, timezone=timezone.utc)\n", "path": "kolibri/utils/time_utils.py"}]} | 2,356 | 131 |
gh_patches_debug_27424 | rasdani/github-patches | git_diff | learningequality__kolibri-2117 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
can no longer sign in using a pre-existing superuser account after upgrade
It appears that my superuser account is no longer available since upgrading to the latest develop.
I would have expected it to get migrated to an admin account with superuser flags enabled.
It actually looks like the user might still be there:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/auth/migrations/0004_auto_20170816_1607.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.9.7 on 2017-08-16 23:07
3 from __future__ import unicode_literals
4
5 import django.core.validators
6 from django.db import migrations, models
7 from kolibri.auth.constants.role_kinds import ADMIN
8
9
10 def device_owner_to_super_user(apps, schema_editor):
11 DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')
12 FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')
13 Facility = apps.get_model('kolibriauth', 'Facility')
14 default_facility = Facility.objects.all().first()
15 DevicePermissions = apps.get_model('device', 'DevicePermissions')
16 DeviceSettings = apps.get_model('device', 'DeviceSettings')
17 Role = apps.get_model('kolibriauth', 'Role')
18 from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole
19 real_default_facility = RealFacility.get_default_facility()
20 # Can't do much if no facilities exist, as no facility to FK the users onto
21 if default_facility:
22 for device_owner in DeviceOwner.objects.all():
23 dataset_id = real_default_facility.dataset_id
24 real_superuser = RealFacilityUser(
25 username=device_owner.username,
26 facility=real_default_facility,
27 dataset_id=dataset_id
28 )
29 uuid = real_superuser.calculate_uuid()
30 superuser = FacilityUser.objects.create(
31 username=device_owner.username,
32 password=device_owner.password,
33 facility=default_facility,
34 full_name=device_owner.full_name,
35 date_joined=device_owner.date_joined,
36 id=uuid,
37 dataset_id=dataset_id,
38 _morango_source_id=real_superuser._morango_source_id,
39 _morango_partition=real_superuser._morango_partition,
40 )
41 real_role = RealRole(
42 user=real_superuser,
43 collection=real_default_facility,
44 kind=ADMIN,
45 dataset_id=dataset_id,
46 )
47 role_uuid = real_role.calculate_uuid()
48 role = Role.objects.create(
49 user=superuser,
50 collection=default_facility,
51 kind=ADMIN,
52 id=role_uuid,
53 dataset_id=dataset_id,
54 _morango_source_id=real_role._morango_source_id,
55 _morango_partition=real_role._morango_partition,
56 )
57 DevicePermissions.objects.create(user=superuser, is_superuser=True)
58 # Finally, set the is_provisioned flag
59 settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True)
60
61
62 class Migration(migrations.Migration):
63
64 dependencies = [
65 ('kolibriauth', '0003_auto_20170621_0958'),
66 ('device', '0001_initial')
67 ]
68
69 operations = [
70 migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop),
71 migrations.DeleteModel(
72 name='DeviceOwner',
73 ),
74 ]
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/auth/migrations/0004_auto_20170816_1607.py b/kolibri/auth/migrations/0004_auto_20170816_1607.py
--- a/kolibri/auth/migrations/0004_auto_20170816_1607.py
+++ b/kolibri/auth/migrations/0004_auto_20170816_1607.py
@@ -8,17 +8,17 @@
def device_owner_to_super_user(apps, schema_editor):
- DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')
- FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')
- Facility = apps.get_model('kolibriauth', 'Facility')
- default_facility = Facility.objects.all().first()
- DevicePermissions = apps.get_model('device', 'DevicePermissions')
- DeviceSettings = apps.get_model('device', 'DeviceSettings')
- Role = apps.get_model('kolibriauth', 'Role')
from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole
real_default_facility = RealFacility.get_default_facility()
# Can't do much if no facilities exist, as no facility to FK the users onto
- if default_facility:
+ if real_default_facility:
+ DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')
+ FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')
+ Facility = apps.get_model('kolibriauth', 'Facility')
+ default_facility = Facility.objects.get(pk=real_default_facility.id)
+ DevicePermissions = apps.get_model('device', 'DevicePermissions')
+ DeviceSettings = apps.get_model('device', 'DeviceSettings')
+ Role = apps.get_model('kolibriauth', 'Role')
for device_owner in DeviceOwner.objects.all():
dataset_id = real_default_facility.dataset_id
real_superuser = RealFacilityUser(
| {"golden_diff": "diff --git a/kolibri/auth/migrations/0004_auto_20170816_1607.py b/kolibri/auth/migrations/0004_auto_20170816_1607.py\n--- a/kolibri/auth/migrations/0004_auto_20170816_1607.py\n+++ b/kolibri/auth/migrations/0004_auto_20170816_1607.py\n@@ -8,17 +8,17 @@\n \n \n def device_owner_to_super_user(apps, schema_editor):\n- DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n- FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n- Facility = apps.get_model('kolibriauth', 'Facility')\n- default_facility = Facility.objects.all().first()\n- DevicePermissions = apps.get_model('device', 'DevicePermissions')\n- DeviceSettings = apps.get_model('device', 'DeviceSettings')\n- Role = apps.get_model('kolibriauth', 'Role')\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n- if default_facility:\n+ if real_default_facility:\n+ DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n+ FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n+ Facility = apps.get_model('kolibriauth', 'Facility')\n+ default_facility = Facility.objects.get(pk=real_default_facility.id)\n+ DevicePermissions = apps.get_model('device', 'DevicePermissions')\n+ DeviceSettings = apps.get_model('device', 'DeviceSettings')\n+ Role = apps.get_model('kolibriauth', 'Role')\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n", "issue": "can no longer sign in using a pre-existing superuser account after upgrade\nIt appears that my superuser account is no longer available since upgrading to the latest develop. \r\n\r\nI would have expected it to get migrated to an admin account with superuser flags enabled.\r\n\r\nIt actually looks like the user might still be there:\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-08-16 23:07\nfrom __future__ import unicode_literals\n\nimport django.core.validators\nfrom django.db import migrations, models\nfrom kolibri.auth.constants.role_kinds import ADMIN\n\n\ndef device_owner_to_super_user(apps, schema_editor):\n DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n Facility = apps.get_model('kolibriauth', 'Facility')\n default_facility = Facility.objects.all().first()\n DevicePermissions = apps.get_model('device', 'DevicePermissions')\n DeviceSettings = apps.get_model('device', 'DeviceSettings')\n Role = apps.get_model('kolibriauth', 'Role')\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n if default_facility:\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n username=device_owner.username,\n facility=real_default_facility,\n dataset_id=dataset_id\n )\n uuid = real_superuser.calculate_uuid()\n superuser = FacilityUser.objects.create(\n username=device_owner.username,\n password=device_owner.password,\n facility=default_facility,\n full_name=device_owner.full_name,\n date_joined=device_owner.date_joined,\n id=uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_superuser._morango_source_id,\n _morango_partition=real_superuser._morango_partition,\n )\n real_role = RealRole(\n user=real_superuser,\n collection=real_default_facility,\n kind=ADMIN,\n dataset_id=dataset_id,\n )\n role_uuid = real_role.calculate_uuid()\n role = Role.objects.create(\n user=superuser,\n collection=default_facility,\n kind=ADMIN,\n id=role_uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_role._morango_source_id,\n _morango_partition=real_role._morango_partition,\n )\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n # Finally, set the is_provisioned flag\n settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('kolibriauth', '0003_auto_20170621_0958'),\n ('device', '0001_initial')\n ]\n\n operations = [\n migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop),\n migrations.DeleteModel(\n name='DeviceOwner',\n ),\n ]\n", "path": "kolibri/auth/migrations/0004_auto_20170816_1607.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-08-16 23:07\nfrom __future__ import unicode_literals\n\nimport django.core.validators\nfrom django.db import migrations, models\nfrom kolibri.auth.constants.role_kinds import ADMIN\n\n\ndef device_owner_to_super_user(apps, schema_editor):\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n if real_default_facility:\n DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n Facility = apps.get_model('kolibriauth', 'Facility')\n default_facility = Facility.objects.get(pk=real_default_facility.id)\n DevicePermissions = apps.get_model('device', 'DevicePermissions')\n DeviceSettings = apps.get_model('device', 'DeviceSettings')\n Role = apps.get_model('kolibriauth', 'Role')\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n username=device_owner.username,\n facility=real_default_facility,\n dataset_id=dataset_id\n )\n uuid = real_superuser.calculate_uuid()\n superuser = FacilityUser.objects.create(\n username=device_owner.username,\n password=device_owner.password,\n facility=default_facility,\n full_name=device_owner.full_name,\n date_joined=device_owner.date_joined,\n id=uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_superuser._morango_source_id,\n _morango_partition=real_superuser._morango_partition,\n )\n real_role = RealRole(\n user=real_superuser,\n collection=real_default_facility,\n kind=ADMIN,\n dataset_id=dataset_id,\n )\n role_uuid = real_role.calculate_uuid()\n role = Role.objects.create(\n user=superuser,\n collection=default_facility,\n kind=ADMIN,\n id=role_uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_role._morango_source_id,\n _morango_partition=real_role._morango_partition,\n )\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n # Finally, set the is_provisioned flag\n settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('kolibriauth', '0003_auto_20170621_0958'),\n ('device', '0001_initial')\n ]\n\n operations = [\n migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop),\n migrations.DeleteModel(\n name='DeviceOwner',\n ),\n ]\n", "path": "kolibri/auth/migrations/0004_auto_20170816_1607.py"}]} | 1,189 | 471 |
gh_patches_debug_18591 | rasdani/github-patches | git_diff | StackStorm__st2-4007 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pinned eventlet version has outstanding bugs
pinned version 0.17 of eventlet has outstanding bugs on it's monkey patching of the ssl module.
e.g.
https://github.com/eventlet/eventlet/issues/371
```
# Note: 0.20.0 removed select.poll() on which some of our code and libraries we
# depend on rely
```
@Kami committed this reversion in https://github.com/StackStorm/st2/commit/1ec43d294e6770e56ec8f9990c805cb9dffe98c5
What was the specific issue?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2common/st2common/util/monkey_patch.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 #
15
16 """
17 Module for performing eventlet and other monkey patching.
18 """
19
20 from __future__ import absolute_import
21
22 import sys
23
24 __all__ = [
25 'monkey_patch',
26 'is_use_debugger_flag_provided'
27 ]
28
29 USE_DEBUGGER_FLAG = '--use-debugger'
30 PARENT_ARGS_FLAG = '--parent-args='
31
32
33 def monkey_patch():
34 """
35 Function which performs eventlet monkey patching and also takes into account "--use-debugger"
36 argument in the command line arguments.
37
38 If this argument is found, no monkey patching is performed for the thread module. This allows
39 user to use remote debuggers.
40 """
41 import eventlet
42
43 patch_thread = not is_use_debugger_flag_provided()
44 eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)
45
46
47 def is_use_debugger_flag_provided():
48 # 1. Check sys.argv directly
49 if USE_DEBUGGER_FLAG in sys.argv:
50 return True
51
52 # 2. Check "parent-args" arguments. This is used for spawned processes such as sensors and
53 # Python runner actions
54
55 for arg in sys.argv:
56 if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg:
57 return True
58
59 return False
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2common/st2common/util/monkey_patch.py b/st2common/st2common/util/monkey_patch.py
--- a/st2common/st2common/util/monkey_patch.py
+++ b/st2common/st2common/util/monkey_patch.py
@@ -23,6 +23,7 @@
__all__ = [
'monkey_patch',
+ 'use_select_poll_workaround',
'is_use_debugger_flag_provided'
]
@@ -44,6 +45,21 @@
eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)
+def use_select_poll_workaround():
+ """
+ Work around for some tests which injects original select module with select.poll()
+ available to sys.modules.
+ """
+ import sys
+ import subprocess
+ import eventlet
+
+ # Work around to get tests to pass with eventlet >= 0.20.0
+ if 'nose' in sys.modules.keys():
+ sys.modules['select'] = eventlet.patcher.original('select')
+ subprocess.select = eventlet.patcher.original('select')
+
+
def is_use_debugger_flag_provided():
# 1. Check sys.argv directly
if USE_DEBUGGER_FLAG in sys.argv:
| {"golden_diff": "diff --git a/st2common/st2common/util/monkey_patch.py b/st2common/st2common/util/monkey_patch.py\n--- a/st2common/st2common/util/monkey_patch.py\n+++ b/st2common/st2common/util/monkey_patch.py\n@@ -23,6 +23,7 @@\n \n __all__ = [\n 'monkey_patch',\n+ 'use_select_poll_workaround',\n 'is_use_debugger_flag_provided'\n ]\n \n@@ -44,6 +45,21 @@\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n \n \n+def use_select_poll_workaround():\n+ \"\"\"\n+ Work around for some tests which injects original select module with select.poll()\n+ available to sys.modules.\n+ \"\"\"\n+ import sys\n+ import subprocess\n+ import eventlet\n+\n+ # Work around to get tests to pass with eventlet >= 0.20.0\n+ if 'nose' in sys.modules.keys():\n+ sys.modules['select'] = eventlet.patcher.original('select')\n+ subprocess.select = eventlet.patcher.original('select')\n+\n+\n def is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n", "issue": "Pinned eventlet version has outstanding bugs\npinned version 0.17 of eventlet has outstanding bugs on it's monkey patching of the ssl module.\r\n\r\ne.g.\r\nhttps://github.com/eventlet/eventlet/issues/371\r\n\r\n```\r\n# Note: 0.20.0 removed select.poll() on which some of our code and libraries we\r\n# depend on rely\r\n```\r\n\r\n@Kami committed this reversion in https://github.com/StackStorm/st2/commit/1ec43d294e6770e56ec8f9990c805cb9dffe98c5\r\n\r\nWhat was the specific issue?\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n#\n\n\"\"\"\nModule for performing eventlet and other monkey patching.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport sys\n\n__all__ = [\n 'monkey_patch',\n 'is_use_debugger_flag_provided'\n]\n\nUSE_DEBUGGER_FLAG = '--use-debugger'\nPARENT_ARGS_FLAG = '--parent-args='\n\n\ndef monkey_patch():\n \"\"\"\n Function which performs eventlet monkey patching and also takes into account \"--use-debugger\"\n argument in the command line arguments.\n\n If this argument is found, no monkey patching is performed for the thread module. This allows\n user to use remote debuggers.\n \"\"\"\n import eventlet\n\n patch_thread = not is_use_debugger_flag_provided()\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n\n\ndef is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n return True\n\n # 2. Check \"parent-args\" arguments. This is used for spawned processes such as sensors and\n # Python runner actions\n\n for arg in sys.argv:\n if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg:\n return True\n\n return False\n", "path": "st2common/st2common/util/monkey_patch.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n#\n\n\"\"\"\nModule for performing eventlet and other monkey patching.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport sys\n\n__all__ = [\n 'monkey_patch',\n 'use_select_poll_workaround',\n 'is_use_debugger_flag_provided'\n]\n\nUSE_DEBUGGER_FLAG = '--use-debugger'\nPARENT_ARGS_FLAG = '--parent-args='\n\n\ndef monkey_patch():\n \"\"\"\n Function which performs eventlet monkey patching and also takes into account \"--use-debugger\"\n argument in the command line arguments.\n\n If this argument is found, no monkey patching is performed for the thread module. This allows\n user to use remote debuggers.\n \"\"\"\n import eventlet\n\n patch_thread = not is_use_debugger_flag_provided()\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n\n\ndef use_select_poll_workaround():\n \"\"\"\n Work around for some tests which injects original select module with select.poll()\n available to sys.modules.\n \"\"\"\n import sys\n import subprocess\n import eventlet\n\n # Work around to get tests to pass with eventlet >= 0.20.0\n if 'nose' in sys.modules.keys():\n sys.modules['select'] = eventlet.patcher.original('select')\n subprocess.select = eventlet.patcher.original('select')\n\n\ndef is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n return True\n\n # 2. Check \"parent-args\" arguments. This is used for spawned processes such as sensors and\n # Python runner actions\n\n for arg in sys.argv:\n if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg:\n return True\n\n return False\n", "path": "st2common/st2common/util/monkey_patch.py"}]} | 966 | 286 |
gh_patches_debug_8508 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1304 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
capture_exception raises exception in UUID __repr__
**Describe the bug**: When capturing exceptions, if the exception is because of a raise in UUID `__init__`, apm agent itself raises with exception:
```
self = <[AttributeError('int') raised in repr()] UUID object at 0x12508cd00>
def __str__(self):
> hex = '%032x' % self.int
E AttributeError: int
```
**To Reproduce**
1. Add this test to `exception_tests.py`:
```python
def test_fail_on_uuid_raise(elasticapm_client):
def generate_uuid():
from uuid import UUID
return UUID("INVALID")
try:
generate_uuid()
except Exception:
elasticapm_client.capture_exception()
```
**Environment (please complete the following information)**
- OS: MacOS / Linux
- Python version: 3.8.11
- Framework and version [e.g. Django 2.1]: Custom / Starlette
- APM Server version: N/A
- Agent version: 6.3.3
**Additional context**
Add any other context about the problem here.
`stacktrace`:
```
../../elasticapm/base.py:272: in capture_exception
return self.capture("Exception", exc_info=exc_info, handled=handled, **kwargs)
../../elasticapm/base.py:241: in capture
data = self._build_msg_for_logging(
../../elasticapm/base.py:469: in _build_msg_for_logging
result = handler.capture(self, **kwargs)
../../elasticapm/events.py:96: in capture
frames = get_stack_info(
../../elasticapm/utils/stacks.py:325: in get_stack_info
result = get_frame_info(
../../elasticapm/utils/stacks.py:294: in get_frame_info
f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}
../../elasticapm/utils/stacks.py:294: in <dictcomp>
f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}
../../elasticapm/events.py:103: in <lambda>
locals_processor_func=lambda local_var: varmap(
../../elasticapm/utils/__init__.py:71: in varmap
ret = func(name, var, **kwargs)
../../elasticapm/events.py:104: in <lambda>
lambda k, val: shorten(
../../elasticapm/utils/encoding.py:205: in shorten
var = transform(var)
../../elasticapm/utils/encoding.py:132: in transform
ret = repr(value)
/usr/local/Cellar/[email protected]/3.8.11/Frameworks/Python.framework/Versions/3.8/lib/python3.8/uuid.py:268: in __repr__
return '%s(%r)' % (self.__class__.__name__, str(self))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/utils/encoding.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # BSD 3-Clause License
4 #
5 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
6 # Copyright (c) 2019, Elasticsearch BV
7 # All rights reserved.
8 #
9 # Redistribution and use in source and binary forms, with or without
10 # modification, are permitted provided that the following conditions are met:
11 #
12 # * Redistributions of source code must retain the above copyright notice, this
13 # list of conditions and the following disclaimer.
14 #
15 # * Redistributions in binary form must reproduce the above copyright notice,
16 # this list of conditions and the following disclaimer in the documentation
17 # and/or other materials provided with the distribution.
18 #
19 # * Neither the name of the copyright holder nor the names of its
20 # contributors may be used to endorse or promote products derived from
21 # this software without specific prior written permission.
22 #
23 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
27 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32
33
34 import datetime
35 import itertools
36 import uuid
37 from decimal import Decimal
38
39 from elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES
40 from elasticapm.utils import compat
41
42 PROTECTED_TYPES = compat.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)
43
44
45 def is_protected_type(obj):
46 """Determine if the object instance is of a protected type.
47
48 Objects of protected types are preserved as-is when passed to
49 force_text(strings_only=True).
50 """
51 return isinstance(obj, PROTECTED_TYPES)
52
53
54 def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
55 """
56 Similar to smart_text, except that lazy instances are resolved to
57 strings, rather than kept as lazy objects.
58
59 If strings_only is True, don't convert (some) non-string-like objects.
60 """
61 # Handle the common case first, saves 30-40% when s is an instance of
62 # compat.text_type. This function gets called often in that setting.
63 #
64 # Adapted from Django
65 if isinstance(s, compat.text_type):
66 return s
67 if strings_only and is_protected_type(s):
68 return s
69 try:
70 if not isinstance(s, compat.string_types):
71 if hasattr(s, "__unicode__"):
72 s = s.__unicode__()
73 else:
74 if compat.PY3:
75 if isinstance(s, bytes):
76 s = compat.text_type(s, encoding, errors)
77 else:
78 s = compat.text_type(s)
79 else:
80 s = compat.text_type(bytes(s), encoding, errors)
81 else:
82 # Note: We use .decode() here, instead of compat.text_type(s, encoding,
83 # errors), so that if s is a SafeBytes, it ends up being a
84 # SafeText at the end.
85 s = s.decode(encoding, errors)
86 except UnicodeDecodeError as e:
87 if not isinstance(s, Exception):
88 raise UnicodeDecodeError(*e.args)
89 else:
90 # If we get to here, the caller has passed in an Exception
91 # subclass populated with non-ASCII bytestring data without a
92 # working unicode method. Try to handle this without raising a
93 # further exception by individually forcing the exception args
94 # to unicode.
95 s = " ".join([force_text(arg, encoding, strings_only, errors) for arg in s])
96 return s
97
98
99 def _has_elasticapm_metadata(value):
100 try:
101 return callable(value.__getattribute__("__elasticapm__"))
102 except Exception:
103 return False
104
105
106 def transform(value, stack=None, context=None):
107 # TODO: make this extendable
108 if context is None:
109 context = {}
110 if stack is None:
111 stack = []
112
113 objid = id(value)
114 if objid in context:
115 return "<...>"
116
117 context[objid] = 1
118 transform_rec = lambda o: transform(o, stack + [value], context)
119
120 if any(value is s for s in stack):
121 ret = "cycle"
122 elif isinstance(value, (tuple, list, set, frozenset)):
123 try:
124 ret = type(value)(transform_rec(o) for o in value)
125 except Exception:
126 # We may be dealing with a namedtuple
127 class value_type(list):
128 __name__ = type(value).__name__
129
130 ret = value_type(transform_rec(o) for o in value)
131 elif isinstance(value, uuid.UUID):
132 ret = repr(value)
133 elif isinstance(value, dict):
134 ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))
135 elif isinstance(value, compat.text_type):
136 ret = to_unicode(value)
137 elif isinstance(value, compat.binary_type):
138 ret = to_string(value)
139 elif not isinstance(value, compat.class_types) and _has_elasticapm_metadata(value):
140 ret = transform_rec(value.__elasticapm__())
141 elif isinstance(value, bool):
142 ret = bool(value)
143 elif isinstance(value, float):
144 ret = float(value)
145 elif isinstance(value, int):
146 ret = int(value)
147 elif compat.PY2 and isinstance(value, long): # noqa F821
148 ret = long(value) # noqa F821
149 elif value is not None:
150 try:
151 ret = transform(repr(value))
152 except Exception:
153 # It's common case that a model's __unicode__ definition may try to query the database
154 # which if it was not cleaned up correctly, would hit a transaction aborted exception
155 ret = u"<BadRepr: %s>" % type(value)
156 else:
157 ret = None
158 del context[objid]
159 return ret
160
161
162 def to_unicode(value):
163 try:
164 value = compat.text_type(force_text(value))
165 except (UnicodeEncodeError, UnicodeDecodeError):
166 value = "(Error decoding value)"
167 except Exception: # in some cases we get a different exception
168 try:
169 value = compat.binary_type(repr(type(value)))
170 except Exception:
171 value = "(Error decoding value)"
172 return value
173
174
175 def to_string(value):
176 try:
177 return compat.binary_type(value.decode("utf-8").encode("utf-8"))
178 except Exception:
179 return to_unicode(value).encode("utf-8")
180
181
182 def shorten(var, list_length=50, string_length=200, dict_length=50, **kwargs):
183 """
184 Shorten a given variable based on configurable maximum lengths, leaving
185 breadcrumbs in the object to show that it was shortened.
186
187 For strings, truncate the string to the max length, and append "..." so
188 the user knows data was lost.
189
190 For lists, truncate the list to the max length, and append two new strings
191 to the list: "..." and "(<x> more elements)" where <x> is the number of
192 elements removed.
193
194 For dicts, truncate the dict to the max length (based on number of key/value
195 pairs) and add a new (key, value) pair to the dict:
196 ("...", "(<x> more elements)") where <x> is the number of key/value pairs
197 removed.
198
199 :param var: Variable to be shortened
200 :param list_length: Max length (in items) of lists
201 :param string_length: Max length (in characters) of strings
202 :param dict_length: Max length (in key/value pairs) of dicts
203 :return: Shortened variable
204 """
205 var = transform(var)
206 if isinstance(var, compat.string_types) and len(var) > string_length:
207 var = var[: string_length - 3] + "..."
208 elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length:
209 # TODO: we should write a real API for storing some metadata with vars when
210 # we get around to doing ref storage
211 var = list(var)[:list_length] + ["...", "(%d more elements)" % (len(var) - list_length,)]
212 elif isinstance(var, dict) and len(var) > dict_length:
213 trimmed_tuples = [(k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length)]
214 if "<truncated>" not in var:
215 trimmed_tuples += [("<truncated>", "(%d more elements)" % (len(var) - dict_length))]
216 var = dict(trimmed_tuples)
217 return var
218
219
220 def keyword_field(string):
221 """
222 If the given string is longer than KEYWORD_MAX_LENGTH, truncate it to
223 KEYWORD_MAX_LENGTH-1, adding the "…" character at the end.
224 """
225 if not isinstance(string, compat.string_types) or len(string) <= KEYWORD_MAX_LENGTH:
226 return string
227 return string[: KEYWORD_MAX_LENGTH - 1] + u"…"
228
229
230 def enforce_label_format(labels):
231 """
232 Enforces label format:
233 * dots, double quotes or stars in keys are replaced by underscores
234 * string values are limited to a length of 1024 characters
235 * values can only be of a limited set of types
236
237 :param labels: a dictionary of labels
238 :return: a new dictionary with sanitized keys/values
239 """
240 new = {}
241 for key, value in compat.iteritems(labels):
242 if not isinstance(value, LABEL_TYPES):
243 value = keyword_field(compat.text_type(value))
244 new[LABEL_RE.sub("_", compat.text_type(key))] = value
245 return new
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/utils/encoding.py b/elasticapm/utils/encoding.py
--- a/elasticapm/utils/encoding.py
+++ b/elasticapm/utils/encoding.py
@@ -129,7 +129,10 @@
ret = value_type(transform_rec(o) for o in value)
elif isinstance(value, uuid.UUID):
- ret = repr(value)
+ try:
+ ret = repr(value)
+ except AttributeError:
+ ret = None
elif isinstance(value, dict):
ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))
elif isinstance(value, compat.text_type):
| {"golden_diff": "diff --git a/elasticapm/utils/encoding.py b/elasticapm/utils/encoding.py\n--- a/elasticapm/utils/encoding.py\n+++ b/elasticapm/utils/encoding.py\n@@ -129,7 +129,10 @@\n \n ret = value_type(transform_rec(o) for o in value)\n elif isinstance(value, uuid.UUID):\n- ret = repr(value)\n+ try:\n+ ret = repr(value)\n+ except AttributeError:\n+ ret = None\n elif isinstance(value, dict):\n ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))\n elif isinstance(value, compat.text_type):\n", "issue": "capture_exception raises exception in UUID __repr__\n**Describe the bug**: When capturing exceptions, if the exception is because of a raise in UUID `__init__`, apm agent itself raises with exception:\r\n\r\n```\r\nself = <[AttributeError('int') raised in repr()] UUID object at 0x12508cd00>\r\n\r\n def __str__(self):\r\n> hex = '%032x' % self.int\r\nE AttributeError: int\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. Add this test to `exception_tests.py`:\r\n```python\r\ndef test_fail_on_uuid_raise(elasticapm_client):\r\n def generate_uuid():\r\n from uuid import UUID\r\n return UUID(\"INVALID\")\r\n\r\n try:\r\n generate_uuid()\r\n except Exception:\r\n elasticapm_client.capture_exception()\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: MacOS / Linux\r\n- Python version: 3.8.11\r\n- Framework and version [e.g. Django 2.1]: Custom / Starlette\r\n- APM Server version: N/A\r\n- Agent version: 6.3.3\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n`stacktrace`:\r\n```\r\n../../elasticapm/base.py:272: in capture_exception\r\n return self.capture(\"Exception\", exc_info=exc_info, handled=handled, **kwargs)\r\n../../elasticapm/base.py:241: in capture\r\n data = self._build_msg_for_logging(\r\n../../elasticapm/base.py:469: in _build_msg_for_logging\r\n result = handler.capture(self, **kwargs)\r\n../../elasticapm/events.py:96: in capture\r\n frames = get_stack_info(\r\n../../elasticapm/utils/stacks.py:325: in get_stack_info\r\n result = get_frame_info(\r\n../../elasticapm/utils/stacks.py:294: in get_frame_info\r\n f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}\r\n../../elasticapm/utils/stacks.py:294: in <dictcomp>\r\n f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}\r\n../../elasticapm/events.py:103: in <lambda>\r\n locals_processor_func=lambda local_var: varmap(\r\n../../elasticapm/utils/__init__.py:71: in varmap\r\n ret = func(name, var, **kwargs)\r\n../../elasticapm/events.py:104: in <lambda>\r\n lambda k, val: shorten(\r\n../../elasticapm/utils/encoding.py:205: in shorten\r\n var = transform(var)\r\n../../elasticapm/utils/encoding.py:132: in transform\r\n ret = repr(value)\r\n/usr/local/Cellar/[email protected]/3.8.11/Frameworks/Python.framework/Versions/3.8/lib/python3.8/uuid.py:268: in __repr__\r\n return '%s(%r)' % (self.__class__.__name__, str(self))\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport datetime\nimport itertools\nimport uuid\nfrom decimal import Decimal\n\nfrom elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES\nfrom elasticapm.utils import compat\n\nPROTECTED_TYPES = compat.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)\n\n\ndef is_protected_type(obj):\n \"\"\"Determine if the object instance is of a protected type.\n\n Objects of protected types are preserved as-is when passed to\n force_text(strings_only=True).\n \"\"\"\n return isinstance(obj, PROTECTED_TYPES)\n\n\ndef force_text(s, encoding=\"utf-8\", strings_only=False, errors=\"strict\"):\n \"\"\"\n Similar to smart_text, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n \"\"\"\n # Handle the common case first, saves 30-40% when s is an instance of\n # compat.text_type. This function gets called often in that setting.\n #\n # Adapted from Django\n if isinstance(s, compat.text_type):\n return s\n if strings_only and is_protected_type(s):\n return s\n try:\n if not isinstance(s, compat.string_types):\n if hasattr(s, \"__unicode__\"):\n s = s.__unicode__()\n else:\n if compat.PY3:\n if isinstance(s, bytes):\n s = compat.text_type(s, encoding, errors)\n else:\n s = compat.text_type(s)\n else:\n s = compat.text_type(bytes(s), encoding, errors)\n else:\n # Note: We use .decode() here, instead of compat.text_type(s, encoding,\n # errors), so that if s is a SafeBytes, it ends up being a\n # SafeText at the end.\n s = s.decode(encoding, errors)\n except UnicodeDecodeError as e:\n if not isinstance(s, Exception):\n raise UnicodeDecodeError(*e.args)\n else:\n # If we get to here, the caller has passed in an Exception\n # subclass populated with non-ASCII bytestring data without a\n # working unicode method. Try to handle this without raising a\n # further exception by individually forcing the exception args\n # to unicode.\n s = \" \".join([force_text(arg, encoding, strings_only, errors) for arg in s])\n return s\n\n\ndef _has_elasticapm_metadata(value):\n try:\n return callable(value.__getattribute__(\"__elasticapm__\"))\n except Exception:\n return False\n\n\ndef transform(value, stack=None, context=None):\n # TODO: make this extendable\n if context is None:\n context = {}\n if stack is None:\n stack = []\n\n objid = id(value)\n if objid in context:\n return \"<...>\"\n\n context[objid] = 1\n transform_rec = lambda o: transform(o, stack + [value], context)\n\n if any(value is s for s in stack):\n ret = \"cycle\"\n elif isinstance(value, (tuple, list, set, frozenset)):\n try:\n ret = type(value)(transform_rec(o) for o in value)\n except Exception:\n # We may be dealing with a namedtuple\n class value_type(list):\n __name__ = type(value).__name__\n\n ret = value_type(transform_rec(o) for o in value)\n elif isinstance(value, uuid.UUID):\n ret = repr(value)\n elif isinstance(value, dict):\n ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))\n elif isinstance(value, compat.text_type):\n ret = to_unicode(value)\n elif isinstance(value, compat.binary_type):\n ret = to_string(value)\n elif not isinstance(value, compat.class_types) and _has_elasticapm_metadata(value):\n ret = transform_rec(value.__elasticapm__())\n elif isinstance(value, bool):\n ret = bool(value)\n elif isinstance(value, float):\n ret = float(value)\n elif isinstance(value, int):\n ret = int(value)\n elif compat.PY2 and isinstance(value, long): # noqa F821\n ret = long(value) # noqa F821\n elif value is not None:\n try:\n ret = transform(repr(value))\n except Exception:\n # It's common case that a model's __unicode__ definition may try to query the database\n # which if it was not cleaned up correctly, would hit a transaction aborted exception\n ret = u\"<BadRepr: %s>\" % type(value)\n else:\n ret = None\n del context[objid]\n return ret\n\n\ndef to_unicode(value):\n try:\n value = compat.text_type(force_text(value))\n except (UnicodeEncodeError, UnicodeDecodeError):\n value = \"(Error decoding value)\"\n except Exception: # in some cases we get a different exception\n try:\n value = compat.binary_type(repr(type(value)))\n except Exception:\n value = \"(Error decoding value)\"\n return value\n\n\ndef to_string(value):\n try:\n return compat.binary_type(value.decode(\"utf-8\").encode(\"utf-8\"))\n except Exception:\n return to_unicode(value).encode(\"utf-8\")\n\n\ndef shorten(var, list_length=50, string_length=200, dict_length=50, **kwargs):\n \"\"\"\n Shorten a given variable based on configurable maximum lengths, leaving\n breadcrumbs in the object to show that it was shortened.\n\n For strings, truncate the string to the max length, and append \"...\" so\n the user knows data was lost.\n\n For lists, truncate the list to the max length, and append two new strings\n to the list: \"...\" and \"(<x> more elements)\" where <x> is the number of\n elements removed.\n\n For dicts, truncate the dict to the max length (based on number of key/value\n pairs) and add a new (key, value) pair to the dict:\n (\"...\", \"(<x> more elements)\") where <x> is the number of key/value pairs\n removed.\n\n :param var: Variable to be shortened\n :param list_length: Max length (in items) of lists\n :param string_length: Max length (in characters) of strings\n :param dict_length: Max length (in key/value pairs) of dicts\n :return: Shortened variable\n \"\"\"\n var = transform(var)\n if isinstance(var, compat.string_types) and len(var) > string_length:\n var = var[: string_length - 3] + \"...\"\n elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length:\n # TODO: we should write a real API for storing some metadata with vars when\n # we get around to doing ref storage\n var = list(var)[:list_length] + [\"...\", \"(%d more elements)\" % (len(var) - list_length,)]\n elif isinstance(var, dict) and len(var) > dict_length:\n trimmed_tuples = [(k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length)]\n if \"<truncated>\" not in var:\n trimmed_tuples += [(\"<truncated>\", \"(%d more elements)\" % (len(var) - dict_length))]\n var = dict(trimmed_tuples)\n return var\n\n\ndef keyword_field(string):\n \"\"\"\n If the given string is longer than KEYWORD_MAX_LENGTH, truncate it to\n KEYWORD_MAX_LENGTH-1, adding the \"\u2026\" character at the end.\n \"\"\"\n if not isinstance(string, compat.string_types) or len(string) <= KEYWORD_MAX_LENGTH:\n return string\n return string[: KEYWORD_MAX_LENGTH - 1] + u\"\u2026\"\n\n\ndef enforce_label_format(labels):\n \"\"\"\n Enforces label format:\n * dots, double quotes or stars in keys are replaced by underscores\n * string values are limited to a length of 1024 characters\n * values can only be of a limited set of types\n\n :param labels: a dictionary of labels\n :return: a new dictionary with sanitized keys/values\n \"\"\"\n new = {}\n for key, value in compat.iteritems(labels):\n if not isinstance(value, LABEL_TYPES):\n value = keyword_field(compat.text_type(value))\n new[LABEL_RE.sub(\"_\", compat.text_type(key))] = value\n return new\n", "path": "elasticapm/utils/encoding.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport datetime\nimport itertools\nimport uuid\nfrom decimal import Decimal\n\nfrom elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES\nfrom elasticapm.utils import compat\n\nPROTECTED_TYPES = compat.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)\n\n\ndef is_protected_type(obj):\n \"\"\"Determine if the object instance is of a protected type.\n\n Objects of protected types are preserved as-is when passed to\n force_text(strings_only=True).\n \"\"\"\n return isinstance(obj, PROTECTED_TYPES)\n\n\ndef force_text(s, encoding=\"utf-8\", strings_only=False, errors=\"strict\"):\n \"\"\"\n Similar to smart_text, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n \"\"\"\n # Handle the common case first, saves 30-40% when s is an instance of\n # compat.text_type. This function gets called often in that setting.\n #\n # Adapted from Django\n if isinstance(s, compat.text_type):\n return s\n if strings_only and is_protected_type(s):\n return s\n try:\n if not isinstance(s, compat.string_types):\n if hasattr(s, \"__unicode__\"):\n s = s.__unicode__()\n else:\n if compat.PY3:\n if isinstance(s, bytes):\n s = compat.text_type(s, encoding, errors)\n else:\n s = compat.text_type(s)\n else:\n s = compat.text_type(bytes(s), encoding, errors)\n else:\n # Note: We use .decode() here, instead of compat.text_type(s, encoding,\n # errors), so that if s is a SafeBytes, it ends up being a\n # SafeText at the end.\n s = s.decode(encoding, errors)\n except UnicodeDecodeError as e:\n if not isinstance(s, Exception):\n raise UnicodeDecodeError(*e.args)\n else:\n # If we get to here, the caller has passed in an Exception\n # subclass populated with non-ASCII bytestring data without a\n # working unicode method. Try to handle this without raising a\n # further exception by individually forcing the exception args\n # to unicode.\n s = \" \".join([force_text(arg, encoding, strings_only, errors) for arg in s])\n return s\n\n\ndef _has_elasticapm_metadata(value):\n try:\n return callable(value.__getattribute__(\"__elasticapm__\"))\n except Exception:\n return False\n\n\ndef transform(value, stack=None, context=None):\n # TODO: make this extendable\n if context is None:\n context = {}\n if stack is None:\n stack = []\n\n objid = id(value)\n if objid in context:\n return \"<...>\"\n\n context[objid] = 1\n transform_rec = lambda o: transform(o, stack + [value], context)\n\n if any(value is s for s in stack):\n ret = \"cycle\"\n elif isinstance(value, (tuple, list, set, frozenset)):\n try:\n ret = type(value)(transform_rec(o) for o in value)\n except Exception:\n # We may be dealing with a namedtuple\n class value_type(list):\n __name__ = type(value).__name__\n\n ret = value_type(transform_rec(o) for o in value)\n elif isinstance(value, uuid.UUID):\n try:\n ret = repr(value)\n except AttributeError:\n ret = None\n elif isinstance(value, dict):\n ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))\n elif isinstance(value, compat.text_type):\n ret = to_unicode(value)\n elif isinstance(value, compat.binary_type):\n ret = to_string(value)\n elif not isinstance(value, compat.class_types) and _has_elasticapm_metadata(value):\n ret = transform_rec(value.__elasticapm__())\n elif isinstance(value, bool):\n ret = bool(value)\n elif isinstance(value, float):\n ret = float(value)\n elif isinstance(value, int):\n ret = int(value)\n elif compat.PY2 and isinstance(value, long): # noqa F821\n ret = long(value) # noqa F821\n elif value is not None:\n try:\n ret = transform(repr(value))\n except Exception:\n # It's common case that a model's __unicode__ definition may try to query the database\n # which if it was not cleaned up correctly, would hit a transaction aborted exception\n ret = u\"<BadRepr: %s>\" % type(value)\n else:\n ret = None\n del context[objid]\n return ret\n\n\ndef to_unicode(value):\n try:\n value = compat.text_type(force_text(value))\n except (UnicodeEncodeError, UnicodeDecodeError):\n value = \"(Error decoding value)\"\n except Exception: # in some cases we get a different exception\n try:\n value = compat.binary_type(repr(type(value)))\n except Exception:\n value = \"(Error decoding value)\"\n return value\n\n\ndef to_string(value):\n try:\n return compat.binary_type(value.decode(\"utf-8\").encode(\"utf-8\"))\n except Exception:\n return to_unicode(value).encode(\"utf-8\")\n\n\ndef shorten(var, list_length=50, string_length=200, dict_length=50, **kwargs):\n \"\"\"\n Shorten a given variable based on configurable maximum lengths, leaving\n breadcrumbs in the object to show that it was shortened.\n\n For strings, truncate the string to the max length, and append \"...\" so\n the user knows data was lost.\n\n For lists, truncate the list to the max length, and append two new strings\n to the list: \"...\" and \"(<x> more elements)\" where <x> is the number of\n elements removed.\n\n For dicts, truncate the dict to the max length (based on number of key/value\n pairs) and add a new (key, value) pair to the dict:\n (\"...\", \"(<x> more elements)\") where <x> is the number of key/value pairs\n removed.\n\n :param var: Variable to be shortened\n :param list_length: Max length (in items) of lists\n :param string_length: Max length (in characters) of strings\n :param dict_length: Max length (in key/value pairs) of dicts\n :return: Shortened variable\n \"\"\"\n var = transform(var)\n if isinstance(var, compat.string_types) and len(var) > string_length:\n var = var[: string_length - 3] + \"...\"\n elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length:\n # TODO: we should write a real API for storing some metadata with vars when\n # we get around to doing ref storage\n var = list(var)[:list_length] + [\"...\", \"(%d more elements)\" % (len(var) - list_length,)]\n elif isinstance(var, dict) and len(var) > dict_length:\n trimmed_tuples = [(k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length)]\n if \"<truncated>\" not in var:\n trimmed_tuples += [(\"<truncated>\", \"(%d more elements)\" % (len(var) - dict_length))]\n var = dict(trimmed_tuples)\n return var\n\n\ndef keyword_field(string):\n \"\"\"\n If the given string is longer than KEYWORD_MAX_LENGTH, truncate it to\n KEYWORD_MAX_LENGTH-1, adding the \"\u2026\" character at the end.\n \"\"\"\n if not isinstance(string, compat.string_types) or len(string) <= KEYWORD_MAX_LENGTH:\n return string\n return string[: KEYWORD_MAX_LENGTH - 1] + u\"\u2026\"\n\n\ndef enforce_label_format(labels):\n \"\"\"\n Enforces label format:\n * dots, double quotes or stars in keys are replaced by underscores\n * string values are limited to a length of 1024 characters\n * values can only be of a limited set of types\n\n :param labels: a dictionary of labels\n :return: a new dictionary with sanitized keys/values\n \"\"\"\n new = {}\n for key, value in compat.iteritems(labels):\n if not isinstance(value, LABEL_TYPES):\n value = keyword_field(compat.text_type(value))\n new[LABEL_RE.sub(\"_\", compat.text_type(key))] = value\n return new\n", "path": "elasticapm/utils/encoding.py"}]} | 3,753 | 149 |
gh_patches_debug_7094 | rasdani/github-patches | git_diff | sktime__sktime-735 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SettingWithCopyWarning in Prophet with exogenous data
**Describe the bug**
Running `Prophet` with `X` different from `None` throws the following warning
```python
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
X["ds"] = X.index
/Users/muhlbach/opt/anaconda3/envs/experimental/lib/python3.8/site-packages/sktime/forecasting/base/adapters/_fbprophet.py:190: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
```
Will you please change line 190 in _fbprophet.py to
```python
X.loc[:,"ds"]=X.index
```
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/forecasting/base/adapters/_fbprophet.py`
Content:
```
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4
5 __author__ = ["Markus Löning", "Martin Walter"]
6 __all__ = ["_ProphetAdapter"]
7
8 import os
9
10 import pandas as pd
11
12 from sktime.forecasting.base._base import DEFAULT_ALPHA
13 from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
14 from sktime.forecasting.base._sktime import _SktimeForecaster
15 from contextlib import contextmanager
16
17
18 class _ProphetAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
19 """Base class for interfacing fbprophet and neuralprophet"""
20
21 def fit(self, y, X=None, fh=None, **fit_params):
22 """Fit to training data.
23 Parameters
24 ----------
25 y : pd.Series
26 Target time series to which to fit the forecaster.
27 X : pd.DataFrame, optional (default=None)
28 Exogenous variables.
29 fh : int, list or np.array, optional (default=None)
30 The forecasters horizon with the steps ahead to to predict.
31 Returns
32 -------
33 self : returns an instance of self.
34 """
35 self._instantiate_model()
36 self._check_changepoints()
37 self._set_y_X(y, X, enforce_index_type=pd.DatetimeIndex)
38 self._set_fh(fh)
39
40 # We have to bring the data into the required format for fbprophet:
41 df = pd.DataFrame({"y": y, "ds": y.index})
42
43 # Add seasonality
44 if self.add_seasonality:
45 self._forecaster.add_seasonality(**self.add_seasonality)
46
47 # Add country holidays
48 if self.add_country_holidays:
49 self._forecaster.add_country_holidays(**self.add_country_holidays)
50
51 # Add regressor (multivariate)
52 if X is not None:
53 df, X = _merge_X(df, X)
54 for col in X.columns:
55 self._forecaster.add_regressor(col)
56
57 if self.verbose:
58 self._forecaster.fit(df=df, **fit_params)
59 else:
60 with _suppress_stdout_stderr():
61 self._forecaster.fit(df=df, **fit_params)
62
63 self._is_fitted = True
64 return self
65
66 def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
67 """Predict
68
69 Parameters
70 ----------
71 fh : array-like
72 The forecasters horizon with the steps ahead to to predict.
73 Default is
74 one-step ahead forecast, i.e. np.array([1]).
75 X : pd.DataFrame, optional
76 Exogenous data, by default None
77 return_pred_int : bool, optional
78 Returns a pd.DataFrame with confidence intervalls, by default False
79 alpha : float, optional
80 Alpha level for confidence intervalls, by default DEFAULT_ALPHA
81
82 Returns
83 -------
84 y_pred : pandas.Series
85 Returns series of predicted values.
86
87 Raises
88 ------
89 Exception
90 Error when merging data
91 """
92 self.check_is_fitted()
93 self._set_fh(fh)
94 self._update_X(X, enforce_index_type=pd.DatetimeIndex)
95
96 fh = self.fh.to_absolute(cutoff=self.cutoff).to_pandas()
97 if not isinstance(fh, pd.DatetimeIndex):
98 raise ValueError("absolute `fh` must be represented as a pd.DatetimeIndex")
99 df = pd.DataFrame({"ds": fh}, index=fh)
100
101 # Merge X with df (of created future DatetimeIndex values)
102 if X is not None:
103 df, X = _merge_X(df, X)
104
105 # don't compute confidence intervals if not asked for
106 with self._return_pred_int(return_pred_int):
107 out = self._forecaster.predict(df)
108
109 out.set_index("ds", inplace=True)
110 y_pred = out.loc[:, "yhat"]
111
112 if return_pred_int:
113 pred_int = out.loc[:, ["yhat_lower", "yhat_upper"]]
114 pred_int.columns = pred_int.columns.str.strip("yhat_")
115 return y_pred, pred_int
116 else:
117 return y_pred
118
119 def get_fitted_params(self):
120 """Get fitted parameters
121
122 Returns
123 -------
124 fitted_params : dict
125
126 References
127 ----------
128 https://facebook.github.io/prophet/docs/additional_topics.html
129 """
130 self.check_is_fitted()
131 fitted_params = {}
132 for name in ["k", "m", "sigma_obs"]:
133 fitted_params[name] = self._forecaster.params[name][0][0]
134 for name in ["delta", "beta"]:
135 fitted_params[name] = self._forecaster.params[name][0]
136 return fitted_params
137
138 def _check_changepoints(self):
139 """Checking arguments for changepoints and assign related arguments
140
141 Returns
142 -------
143 self
144 """
145 if self.changepoints is not None:
146 self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name="ds")
147 self.n_changepoints = len(self.changepoints)
148 self.specified_changepoints = True
149 else:
150 self.specified_changepoints = False
151 return self
152
153 @contextmanager
154 def _return_pred_int(self, return_pred_int):
155 if not return_pred_int:
156 # setting uncertainty samples to zero avoids computing pred ints
157 self._forecaster.uncertainty_samples = 0
158 try:
159 yield
160 finally:
161 if not return_pred_int:
162 self._forecaster.uncertainty_samples = self.uncertainty_samples
163
164
165 def _merge_X(df, X):
166 """Merge X and df on the DatetimeIndex
167
168 Parameters
169 ----------
170 fh : sktime.ForecastingHorizon
171 X : pd.DataFrame
172 Exog data
173 df : pd.DataFrame
174 Contains a DatetimeIndex column "ds"
175
176 Returns
177 -------
178 pd.DataFrame
179 DataFrame with containing X and df (with a DatetimeIndex column "ds")
180
181 Raises
182 ------
183 TypeError
184 Error if merging was not possible
185 """
186 # Merging on the index is unreliable, possibly due to loss of freq information in fh
187 X.columns = X.columns.astype(str)
188 if "ds" in X.columns:
189 raise ValueError("Column name 'ds' is reserved in fbprophet")
190 X["ds"] = X.index
191 # df = df.merge(X, how="inner", on="ds", copy=False)
192 df = df.merge(X, how="inner", on="ds")
193 return df, X.drop(columns="ds")
194
195
196 class _suppress_stdout_stderr(object):
197 """
198 A context manager for doing a "deep suppression" of stdout and stderr in
199 Python, i.e. will suppress all print, even if the print originates in a
200 compiled C/Fortran sub-function.
201 This will not suppress raised exceptions, since exceptions are printed
202 to stderr just before a script exits, and after the context manager has
203 exited (at least, I think that is why it lets exceptions through).
204
205 References
206 ----------
207 https://github.com/facebook/prophet/issues/223
208 """
209
210 def __init__(self):
211 # Open a pair of null files
212 self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
213 # Save the actual stdout (1) and stderr (2) file descriptors.
214 self.save_fds = [os.dup(1), os.dup(2)]
215
216 def __enter__(self):
217 # Assign the null pointers to stdout and stderr.
218 os.dup2(self.null_fds[0], 1)
219 os.dup2(self.null_fds[1], 2)
220
221 def __exit__(self, *_):
222 # Re-assign the real stdout/stderr back to (1) and (2)
223 os.dup2(self.save_fds[0], 1)
224 os.dup2(self.save_fds[1], 2)
225 # Close the null files
226 for fd in self.null_fds + self.save_fds:
227 os.close(fd)
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/forecasting/base/adapters/_fbprophet.py b/sktime/forecasting/base/adapters/_fbprophet.py
--- a/sktime/forecasting/base/adapters/_fbprophet.py
+++ b/sktime/forecasting/base/adapters/_fbprophet.py
@@ -187,7 +187,7 @@
X.columns = X.columns.astype(str)
if "ds" in X.columns:
raise ValueError("Column name 'ds' is reserved in fbprophet")
- X["ds"] = X.index
+ X.loc[:, "ds"] = X.index
# df = df.merge(X, how="inner", on="ds", copy=False)
df = df.merge(X, how="inner", on="ds")
return df, X.drop(columns="ds")
| {"golden_diff": "diff --git a/sktime/forecasting/base/adapters/_fbprophet.py b/sktime/forecasting/base/adapters/_fbprophet.py\n--- a/sktime/forecasting/base/adapters/_fbprophet.py\n+++ b/sktime/forecasting/base/adapters/_fbprophet.py\n@@ -187,7 +187,7 @@\n X.columns = X.columns.astype(str)\n if \"ds\" in X.columns:\n raise ValueError(\"Column name 'ds' is reserved in fbprophet\")\n- X[\"ds\"] = X.index\n+ X.loc[:, \"ds\"] = X.index\n # df = df.merge(X, how=\"inner\", on=\"ds\", copy=False)\n df = df.merge(X, how=\"inner\", on=\"ds\")\n return df, X.drop(columns=\"ds\")\n", "issue": "SettingWithCopyWarning in Prophet with exogenous data\n**Describe the bug**\r\nRunning `Prophet` with `X` different from `None` throws the following warning\r\n\r\n```python\r\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\r\n X[\"ds\"] = X.index\r\n/Users/muhlbach/opt/anaconda3/envs/experimental/lib/python3.8/site-packages/sktime/forecasting/base/adapters/_fbprophet.py:190: SettingWithCopyWarning: \r\nA value is trying to be set on a copy of a slice from a DataFrame.\r\nTry using .loc[row_indexer,col_indexer] = value instead\r\n```\r\n\r\nWill you please change line 190 in _fbprophet.py to\r\n```python\r\nX.loc[:,\"ds\"]=X.index\r\n```\r\n\r\nThanks!\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__author__ = [\"Markus L\u00f6ning\", \"Martin Walter\"]\n__all__ = [\"_ProphetAdapter\"]\n\nimport os\n\nimport pandas as pd\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin\nfrom sktime.forecasting.base._sktime import _SktimeForecaster\nfrom contextlib import contextmanager\n\n\nclass _ProphetAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):\n \"\"\"Base class for interfacing fbprophet and neuralprophet\"\"\"\n\n def fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._instantiate_model()\n self._check_changepoints()\n self._set_y_X(y, X, enforce_index_type=pd.DatetimeIndex)\n self._set_fh(fh)\n\n # We have to bring the data into the required format for fbprophet:\n df = pd.DataFrame({\"y\": y, \"ds\": y.index})\n\n # Add seasonality\n if self.add_seasonality:\n self._forecaster.add_seasonality(**self.add_seasonality)\n\n # Add country holidays\n if self.add_country_holidays:\n self._forecaster.add_country_holidays(**self.add_country_holidays)\n\n # Add regressor (multivariate)\n if X is not None:\n df, X = _merge_X(df, X)\n for col in X.columns:\n self._forecaster.add_regressor(col)\n\n if self.verbose:\n self._forecaster.fit(df=df, **fit_params)\n else:\n with _suppress_stdout_stderr():\n self._forecaster.fit(df=df, **fit_params)\n\n self._is_fitted = True\n return self\n\n def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n \"\"\"Predict\n\n Parameters\n ----------\n fh : array-like\n The forecasters horizon with the steps ahead to to predict.\n Default is\n one-step ahead forecast, i.e. np.array([1]).\n X : pd.DataFrame, optional\n Exogenous data, by default None\n return_pred_int : bool, optional\n Returns a pd.DataFrame with confidence intervalls, by default False\n alpha : float, optional\n Alpha level for confidence intervalls, by default DEFAULT_ALPHA\n\n Returns\n -------\n y_pred : pandas.Series\n Returns series of predicted values.\n\n Raises\n ------\n Exception\n Error when merging data\n \"\"\"\n self.check_is_fitted()\n self._set_fh(fh)\n self._update_X(X, enforce_index_type=pd.DatetimeIndex)\n\n fh = self.fh.to_absolute(cutoff=self.cutoff).to_pandas()\n if not isinstance(fh, pd.DatetimeIndex):\n raise ValueError(\"absolute `fh` must be represented as a pd.DatetimeIndex\")\n df = pd.DataFrame({\"ds\": fh}, index=fh)\n\n # Merge X with df (of created future DatetimeIndex values)\n if X is not None:\n df, X = _merge_X(df, X)\n\n # don't compute confidence intervals if not asked for\n with self._return_pred_int(return_pred_int):\n out = self._forecaster.predict(df)\n\n out.set_index(\"ds\", inplace=True)\n y_pred = out.loc[:, \"yhat\"]\n\n if return_pred_int:\n pred_int = out.loc[:, [\"yhat_lower\", \"yhat_upper\"]]\n pred_int.columns = pred_int.columns.str.strip(\"yhat_\")\n return y_pred, pred_int\n else:\n return y_pred\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters\n\n Returns\n -------\n fitted_params : dict\n\n References\n ----------\n https://facebook.github.io/prophet/docs/additional_topics.html\n \"\"\"\n self.check_is_fitted()\n fitted_params = {}\n for name in [\"k\", \"m\", \"sigma_obs\"]:\n fitted_params[name] = self._forecaster.params[name][0][0]\n for name in [\"delta\", \"beta\"]:\n fitted_params[name] = self._forecaster.params[name][0]\n return fitted_params\n\n def _check_changepoints(self):\n \"\"\"Checking arguments for changepoints and assign related arguments\n\n Returns\n -------\n self\n \"\"\"\n if self.changepoints is not None:\n self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name=\"ds\")\n self.n_changepoints = len(self.changepoints)\n self.specified_changepoints = True\n else:\n self.specified_changepoints = False\n return self\n\n @contextmanager\n def _return_pred_int(self, return_pred_int):\n if not return_pred_int:\n # setting uncertainty samples to zero avoids computing pred ints\n self._forecaster.uncertainty_samples = 0\n try:\n yield\n finally:\n if not return_pred_int:\n self._forecaster.uncertainty_samples = self.uncertainty_samples\n\n\ndef _merge_X(df, X):\n \"\"\"Merge X and df on the DatetimeIndex\n\n Parameters\n ----------\n fh : sktime.ForecastingHorizon\n X : pd.DataFrame\n Exog data\n df : pd.DataFrame\n Contains a DatetimeIndex column \"ds\"\n\n Returns\n -------\n pd.DataFrame\n DataFrame with containing X and df (with a DatetimeIndex column \"ds\")\n\n Raises\n ------\n TypeError\n Error if merging was not possible\n \"\"\"\n # Merging on the index is unreliable, possibly due to loss of freq information in fh\n X.columns = X.columns.astype(str)\n if \"ds\" in X.columns:\n raise ValueError(\"Column name 'ds' is reserved in fbprophet\")\n X[\"ds\"] = X.index\n # df = df.merge(X, how=\"inner\", on=\"ds\", copy=False)\n df = df.merge(X, how=\"inner\", on=\"ds\")\n return df, X.drop(columns=\"ds\")\n\n\nclass _suppress_stdout_stderr(object):\n \"\"\"\n A context manager for doing a \"deep suppression\" of stdout and stderr in\n Python, i.e. will suppress all print, even if the print originates in a\n compiled C/Fortran sub-function.\n This will not suppress raised exceptions, since exceptions are printed\n to stderr just before a script exits, and after the context manager has\n exited (at least, I think that is why it lets exceptions through).\n\n References\n ----------\n https://github.com/facebook/prophet/issues/223\n \"\"\"\n\n def __init__(self):\n # Open a pair of null files\n self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]\n # Save the actual stdout (1) and stderr (2) file descriptors.\n self.save_fds = [os.dup(1), os.dup(2)]\n\n def __enter__(self):\n # Assign the null pointers to stdout and stderr.\n os.dup2(self.null_fds[0], 1)\n os.dup2(self.null_fds[1], 2)\n\n def __exit__(self, *_):\n # Re-assign the real stdout/stderr back to (1) and (2)\n os.dup2(self.save_fds[0], 1)\n os.dup2(self.save_fds[1], 2)\n # Close the null files\n for fd in self.null_fds + self.save_fds:\n os.close(fd)\n", "path": "sktime/forecasting/base/adapters/_fbprophet.py"}], "after_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__author__ = [\"Markus L\u00f6ning\", \"Martin Walter\"]\n__all__ = [\"_ProphetAdapter\"]\n\nimport os\n\nimport pandas as pd\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin\nfrom sktime.forecasting.base._sktime import _SktimeForecaster\nfrom contextlib import contextmanager\n\n\nclass _ProphetAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):\n \"\"\"Base class for interfacing fbprophet and neuralprophet\"\"\"\n\n def fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._instantiate_model()\n self._check_changepoints()\n self._set_y_X(y, X, enforce_index_type=pd.DatetimeIndex)\n self._set_fh(fh)\n\n # We have to bring the data into the required format for fbprophet:\n df = pd.DataFrame({\"y\": y, \"ds\": y.index})\n\n # Add seasonality\n if self.add_seasonality:\n self._forecaster.add_seasonality(**self.add_seasonality)\n\n # Add country holidays\n if self.add_country_holidays:\n self._forecaster.add_country_holidays(**self.add_country_holidays)\n\n # Add regressor (multivariate)\n if X is not None:\n df, X = _merge_X(df, X)\n for col in X.columns:\n self._forecaster.add_regressor(col)\n\n if self.verbose:\n self._forecaster.fit(df=df, **fit_params)\n else:\n with _suppress_stdout_stderr():\n self._forecaster.fit(df=df, **fit_params)\n\n self._is_fitted = True\n return self\n\n def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n \"\"\"Predict\n\n Parameters\n ----------\n fh : array-like\n The forecasters horizon with the steps ahead to to predict.\n Default is\n one-step ahead forecast, i.e. np.array([1]).\n X : pd.DataFrame, optional\n Exogenous data, by default None\n return_pred_int : bool, optional\n Returns a pd.DataFrame with confidence intervalls, by default False\n alpha : float, optional\n Alpha level for confidence intervalls, by default DEFAULT_ALPHA\n\n Returns\n -------\n y_pred : pandas.Series\n Returns series of predicted values.\n\n Raises\n ------\n Exception\n Error when merging data\n \"\"\"\n self.check_is_fitted()\n self._set_fh(fh)\n self._update_X(X, enforce_index_type=pd.DatetimeIndex)\n\n fh = self.fh.to_absolute(cutoff=self.cutoff).to_pandas()\n if not isinstance(fh, pd.DatetimeIndex):\n raise ValueError(\"absolute `fh` must be represented as a pd.DatetimeIndex\")\n df = pd.DataFrame({\"ds\": fh}, index=fh)\n\n # Merge X with df (of created future DatetimeIndex values)\n if X is not None:\n df, X = _merge_X(df, X)\n\n # don't compute confidence intervals if not asked for\n with self._return_pred_int(return_pred_int):\n out = self._forecaster.predict(df)\n\n out.set_index(\"ds\", inplace=True)\n y_pred = out.loc[:, \"yhat\"]\n\n if return_pred_int:\n pred_int = out.loc[:, [\"yhat_lower\", \"yhat_upper\"]]\n pred_int.columns = pred_int.columns.str.strip(\"yhat_\")\n return y_pred, pred_int\n else:\n return y_pred\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters\n\n Returns\n -------\n fitted_params : dict\n\n References\n ----------\n https://facebook.github.io/prophet/docs/additional_topics.html\n \"\"\"\n self.check_is_fitted()\n fitted_params = {}\n for name in [\"k\", \"m\", \"sigma_obs\"]:\n fitted_params[name] = self._forecaster.params[name][0][0]\n for name in [\"delta\", \"beta\"]:\n fitted_params[name] = self._forecaster.params[name][0]\n return fitted_params\n\n def _check_changepoints(self):\n \"\"\"Checking arguments for changepoints and assign related arguments\n\n Returns\n -------\n self\n \"\"\"\n if self.changepoints is not None:\n self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name=\"ds\")\n self.n_changepoints = len(self.changepoints)\n self.specified_changepoints = True\n else:\n self.specified_changepoints = False\n return self\n\n @contextmanager\n def _return_pred_int(self, return_pred_int):\n if not return_pred_int:\n # setting uncertainty samples to zero avoids computing pred ints\n self._forecaster.uncertainty_samples = 0\n try:\n yield\n finally:\n if not return_pred_int:\n self._forecaster.uncertainty_samples = self.uncertainty_samples\n\n\ndef _merge_X(df, X):\n \"\"\"Merge X and df on the DatetimeIndex\n\n Parameters\n ----------\n fh : sktime.ForecastingHorizon\n X : pd.DataFrame\n Exog data\n df : pd.DataFrame\n Contains a DatetimeIndex column \"ds\"\n\n Returns\n -------\n pd.DataFrame\n DataFrame with containing X and df (with a DatetimeIndex column \"ds\")\n\n Raises\n ------\n TypeError\n Error if merging was not possible\n \"\"\"\n # Merging on the index is unreliable, possibly due to loss of freq information in fh\n X.columns = X.columns.astype(str)\n if \"ds\" in X.columns:\n raise ValueError(\"Column name 'ds' is reserved in fbprophet\")\n X.loc[:, \"ds\"] = X.index\n # df = df.merge(X, how=\"inner\", on=\"ds\", copy=False)\n df = df.merge(X, how=\"inner\", on=\"ds\")\n return df, X.drop(columns=\"ds\")\n\n\nclass _suppress_stdout_stderr(object):\n \"\"\"\n A context manager for doing a \"deep suppression\" of stdout and stderr in\n Python, i.e. will suppress all print, even if the print originates in a\n compiled C/Fortran sub-function.\n This will not suppress raised exceptions, since exceptions are printed\n to stderr just before a script exits, and after the context manager has\n exited (at least, I think that is why it lets exceptions through).\n\n References\n ----------\n https://github.com/facebook/prophet/issues/223\n \"\"\"\n\n def __init__(self):\n # Open a pair of null files\n self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]\n # Save the actual stdout (1) and stderr (2) file descriptors.\n self.save_fds = [os.dup(1), os.dup(2)]\n\n def __enter__(self):\n # Assign the null pointers to stdout and stderr.\n os.dup2(self.null_fds[0], 1)\n os.dup2(self.null_fds[1], 2)\n\n def __exit__(self, *_):\n # Re-assign the real stdout/stderr back to (1) and (2)\n os.dup2(self.save_fds[0], 1)\n os.dup2(self.save_fds[1], 2)\n # Close the null files\n for fd in self.null_fds + self.save_fds:\n os.close(fd)\n", "path": "sktime/forecasting/base/adapters/_fbprophet.py"}]} | 2,793 | 182 |
gh_patches_debug_9640 | rasdani/github-patches | git_diff | chainer__chainer-7760 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor utility link in optimizer_hooks unit tests
`chainer_tests/optimizer_hooks_tests` uses similar dummy links, which can be refactored to reduce repetition.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/optimizer_hooks/gradient_hard_clipping.py`
Content:
```
1 import chainer
2 from chainer import backend
3
4
5 class GradientHardClipping(object):
6
7 """Optimizer/UpdateRule hook function for gradient clipping.
8
9 This hook function clips all gradient arrays to be within a lower and upper
10 bound.
11
12 Args:
13 lower_bound (float): The lower bound of the gradient value.
14 upper_bound (float): The upper bound of the gradient value.
15
16 Attributes:
17 ~optimizer_hooks.GradientHardClipping.lower_bound (float): The
18 lower bound of the gradient value.
19 ~optimizer_hooks.GradientHardClipping.upper_bound (float): The
20 upper bound of the gradient value.
21 ~optimizer_hooks.GradientHardClipping.timing (string): Specifies
22 when this hook should be called by the
23 Optimizer/UpdateRule. Valid values are 'pre'
24 (before any updates) and 'post'
25 (after any updates).
26 ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \
27 Specifies if this hook is called for each parameter
28 (``True``) or only once (``False``) by an optimizer to
29 which this hook is registered. This function does
30 not expect users to switch the value from default one,
31 which is `True`.
32
33 .. versionadded:: 4.0.0
34 The *timing* parameter.
35
36 """
37 name = 'GradientHardClipping'
38 call_for_each_param = True
39 timing = 'pre'
40
41 def __init__(self, lower_bound, upper_bound):
42 self.lower_bound = lower_bound
43 self.upper_bound = upper_bound
44
45 def __call__(self, rule, param):
46 grad = param.grad
47 if grad is None:
48 return
49 with chainer.using_device(param.device):
50 xp = param.device.xp
51 # TODO(kshitij12345): Fix when chainerx.clip
52 # supports kwarg `out`.
53 if xp == backend.chainerx \
54 or isinstance(param.grad, backend.intel64.mdarray):
55 grad[:] = grad.clip(self.lower_bound, self.upper_bound)
56 else:
57 # Save on new object allocation when using numpy and cupy
58 # using kwarg `out`
59 xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/optimizer_hooks/gradient_hard_clipping.py b/chainer/optimizer_hooks/gradient_hard_clipping.py
--- a/chainer/optimizer_hooks/gradient_hard_clipping.py
+++ b/chainer/optimizer_hooks/gradient_hard_clipping.py
@@ -52,7 +52,7 @@
# supports kwarg `out`.
if xp == backend.chainerx \
or isinstance(param.grad, backend.intel64.mdarray):
- grad[:] = grad.clip(self.lower_bound, self.upper_bound)
+ grad[...] = grad.clip(self.lower_bound, self.upper_bound)
else:
# Save on new object allocation when using numpy and cupy
# using kwarg `out`
| {"golden_diff": "diff --git a/chainer/optimizer_hooks/gradient_hard_clipping.py b/chainer/optimizer_hooks/gradient_hard_clipping.py\n--- a/chainer/optimizer_hooks/gradient_hard_clipping.py\n+++ b/chainer/optimizer_hooks/gradient_hard_clipping.py\n@@ -52,7 +52,7 @@\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n- grad[:] = grad.clip(self.lower_bound, self.upper_bound)\n+ grad[...] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n", "issue": "Refactor utility link in optimizer_hooks unit tests\n`chainer_tests/optimizer_hooks_tests` uses similar dummy links, which can be refactored to reduce repetition.\n", "before_files": [{"content": "import chainer\nfrom chainer import backend\n\n\nclass GradientHardClipping(object):\n\n \"\"\"Optimizer/UpdateRule hook function for gradient clipping.\n\n This hook function clips all gradient arrays to be within a lower and upper\n bound.\n\n Args:\n lower_bound (float): The lower bound of the gradient value.\n upper_bound (float): The upper bound of the gradient value.\n\n Attributes:\n ~optimizer_hooks.GradientHardClipping.lower_bound (float): The\n lower bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.upper_bound (float): The\n upper bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.timing (string): Specifies\n when this hook should be called by the\n Optimizer/UpdateRule. Valid values are 'pre'\n (before any updates) and 'post'\n (after any updates).\n ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \\\n Specifies if this hook is called for each parameter\n (``True``) or only once (``False``) by an optimizer to\n which this hook is registered. This function does\n not expect users to switch the value from default one,\n which is `True`.\n\n .. versionadded:: 4.0.0\n The *timing* parameter.\n\n \"\"\"\n name = 'GradientHardClipping'\n call_for_each_param = True\n timing = 'pre'\n\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, rule, param):\n grad = param.grad\n if grad is None:\n return\n with chainer.using_device(param.device):\n xp = param.device.xp\n # TODO(kshitij12345): Fix when chainerx.clip\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n grad[:] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)\n", "path": "chainer/optimizer_hooks/gradient_hard_clipping.py"}], "after_files": [{"content": "import chainer\nfrom chainer import backend\n\n\nclass GradientHardClipping(object):\n\n \"\"\"Optimizer/UpdateRule hook function for gradient clipping.\n\n This hook function clips all gradient arrays to be within a lower and upper\n bound.\n\n Args:\n lower_bound (float): The lower bound of the gradient value.\n upper_bound (float): The upper bound of the gradient value.\n\n Attributes:\n ~optimizer_hooks.GradientHardClipping.lower_bound (float): The\n lower bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.upper_bound (float): The\n upper bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.timing (string): Specifies\n when this hook should be called by the\n Optimizer/UpdateRule. Valid values are 'pre'\n (before any updates) and 'post'\n (after any updates).\n ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \\\n Specifies if this hook is called for each parameter\n (``True``) or only once (``False``) by an optimizer to\n which this hook is registered. This function does\n not expect users to switch the value from default one,\n which is `True`.\n\n .. versionadded:: 4.0.0\n The *timing* parameter.\n\n \"\"\"\n name = 'GradientHardClipping'\n call_for_each_param = True\n timing = 'pre'\n\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, rule, param):\n grad = param.grad\n if grad is None:\n return\n with chainer.using_device(param.device):\n xp = param.device.xp\n # TODO(kshitij12345): Fix when chainerx.clip\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n grad[...] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)\n", "path": "chainer/optimizer_hooks/gradient_hard_clipping.py"}]} | 899 | 155 |
gh_patches_debug_7042 | rasdani/github-patches | git_diff | pytorch__vision-7449 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dataset MovingMNIST: `split=None` returns test dataset
### 🐛 Describe the bug
I've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19
I've tested this with the following code:
```python
from torchvision import datasets
import torch
dataset = datasets.MovingMNIST(root="data", download=True)
dataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])
```
I believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62
To fix this, I propose the following two changes:
- Separate the handling of None and test splits in the code.
- Only process lines 46-50 when split is not None.
Reference issue: #6981
I'm happy to help on this issue, please assign to me on this one.
### Versions
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.2.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: Could not collect
Libc version: N/A
Python version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.2.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Apple M1 Pro
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.2
[pip3] torch==2.0.0
[pip3] torch-tb-profiler==0.4.1
[pip3] torchvision==0.15.1
[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge
[conda] pytorch 2.0.0 py3.10_0 pytorch
[conda] torch 2.0.0 pypi_0 pypi
[conda] torch-tb-profiler 0.4.1 pypi_0 pypi
[conda] torchvision 0.15.1 pypi_0 pypi
cc @pmeier
Dataset MovingMNIST: `split=None` returns test dataset
### 🐛 Describe the bug
I've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19
I've tested this with the following code:
```python
from torchvision import datasets
import torch
dataset = datasets.MovingMNIST(root="data", download=True)
dataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])
```
I believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62
To fix this, I propose the following two changes:
- Separate the handling of None and test splits in the code.
- Only process lines 46-50 when split is not None.
Reference issue: #6981
I'm happy to help on this issue, please assign to me on this one.
### Versions
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.2.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: Could not collect
Libc version: N/A
Python version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.2.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Apple M1 Pro
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.2
[pip3] torch==2.0.0
[pip3] torch-tb-profiler==0.4.1
[pip3] torchvision==0.15.1
[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge
[conda] pytorch 2.0.0 py3.10_0 pytorch
[conda] torch 2.0.0 pypi_0 pypi
[conda] torch-tb-profiler 0.4.1 pypi_0 pypi
[conda] torchvision 0.15.1 pypi_0 pypi
cc @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/moving_mnist.py`
Content:
```
1 import os.path
2 from typing import Callable, Optional
3
4 import numpy as np
5 import torch
6 from torchvision.datasets.utils import download_url, verify_str_arg
7 from torchvision.datasets.vision import VisionDataset
8
9
10 class MovingMNIST(VisionDataset):
11 """`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
12
13 Args:
14 root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
15 split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
16 If ``split=None``, the full data is returned.
17 split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
18 frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
19 is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
20 transform (callable, optional): A function/transform that takes in an torch Tensor
21 and returns a transformed version. E.g, ``transforms.RandomCrop``
22 download (bool, optional): If true, downloads the dataset from the internet and
23 puts it in root directory. If dataset is already downloaded, it is not
24 downloaded again.
25 """
26
27 _URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
28
29 def __init__(
30 self,
31 root: str,
32 split: Optional[str] = None,
33 split_ratio: int = 10,
34 download: bool = False,
35 transform: Optional[Callable] = None,
36 ) -> None:
37 super().__init__(root, transform=transform)
38
39 self._base_folder = os.path.join(self.root, self.__class__.__name__)
40 self._filename = self._URL.split("/")[-1]
41
42 if split is not None:
43 verify_str_arg(split, "split", ("train", "test"))
44 self.split = split
45
46 if not isinstance(split_ratio, int):
47 raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
48 elif not (1 <= split_ratio <= 19):
49 raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
50 self.split_ratio = split_ratio
51
52 if download:
53 self.download()
54
55 if not self._check_exists():
56 raise RuntimeError("Dataset not found. You can use download=True to download it.")
57
58 data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
59 if self.split == "train":
60 data = data[: self.split_ratio]
61 else:
62 data = data[self.split_ratio :]
63 self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
64
65 def __getitem__(self, idx: int) -> torch.Tensor:
66 """
67 Args:
68 index (int): Index
69 Returns:
70 torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
71 """
72 data = self.data[idx]
73 if self.transform is not None:
74 data = self.transform(data)
75
76 return data
77
78 def __len__(self) -> int:
79 return len(self.data)
80
81 def _check_exists(self) -> bool:
82 return os.path.exists(os.path.join(self._base_folder, self._filename))
83
84 def download(self) -> None:
85 if self._check_exists():
86 return
87
88 download_url(
89 url=self._URL,
90 root=self._base_folder,
91 filename=self._filename,
92 md5="be083ec986bfe91a449d63653c411eb2",
93 )
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/moving_mnist.py b/torchvision/datasets/moving_mnist.py
--- a/torchvision/datasets/moving_mnist.py
+++ b/torchvision/datasets/moving_mnist.py
@@ -58,7 +58,7 @@
data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
if self.split == "train":
data = data[: self.split_ratio]
- else:
+ elif self.split == "test":
data = data[self.split_ratio :]
self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
| {"golden_diff": "diff --git a/torchvision/datasets/moving_mnist.py b/torchvision/datasets/moving_mnist.py\n--- a/torchvision/datasets/moving_mnist.py\n+++ b/torchvision/datasets/moving_mnist.py\n@@ -58,7 +58,7 @@\n data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))\n if self.split == \"train\":\n data = data[: self.split_ratio]\n- else:\n+ elif self.split == \"test\":\n data = data[self.split_ratio :]\n self.data = data.transpose(0, 1).unsqueeze(2).contiguous()\n", "issue": "Dataset MovingMNIST: `split=None` returns test dataset\n### \ud83d\udc1b Describe the bug\n\nI've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19\r\n\r\nI've tested this with the following code:\r\n```python\r\nfrom torchvision import datasets\r\nimport torch\r\n\r\ndataset = datasets.MovingMNIST(root=\"data\", download=True)\r\ndataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])\r\n```\r\n\r\nI believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62\r\n\r\nTo fix this, I propose the following two changes:\r\n- Separate the handling of None and test splits in the code.\r\n- Only process lines 46-50 when split is not None.\r\n\r\nReference issue: #6981 \r\n\r\nI'm happy to help on this issue, please assign to me on this one.\n\n### Versions\n\nPyTorch version: 2.0.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: None\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: macOS 13.2.1 (arm64)\r\nGCC version: Could not collect\r\nClang version: 14.0.0 (clang-1400.0.29.202)\r\nCMake version: Could not collect\r\nLibc version: N/A\r\n\r\nPython version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)\r\nPython platform: macOS-13.2.1-arm64-arm-64bit\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nCUDA_MODULE_LOADING set to: N/A\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nApple M1 Pro\r\n\r\nVersions of relevant libraries:\r\n[pip3] mypy-extensions==1.0.0\r\n[pip3] numpy==1.24.2\r\n[pip3] torch==2.0.0\r\n[pip3] torch-tb-profiler==0.4.1\r\n[pip3] torchvision==0.15.1\r\n[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge\r\n[conda] pytorch 2.0.0 py3.10_0 pytorch\r\n[conda] torch 2.0.0 pypi_0 pypi\r\n[conda] torch-tb-profiler 0.4.1 pypi_0 pypi\r\n[conda] torchvision 0.15.1 pypi_0 pypi\n\ncc @pmeier\nDataset MovingMNIST: `split=None` returns test dataset\n### \ud83d\udc1b Describe the bug\n\nI've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19\r\n\r\nI've tested this with the following code:\r\n```python\r\nfrom torchvision import datasets\r\nimport torch\r\n\r\ndataset = datasets.MovingMNIST(root=\"data\", download=True)\r\ndataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])\r\n```\r\n\r\nI believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62\r\n\r\nTo fix this, I propose the following two changes:\r\n- Separate the handling of None and test splits in the code.\r\n- Only process lines 46-50 when split is not None.\r\n\r\nReference issue: #6981 \r\n\r\nI'm happy to help on this issue, please assign to me on this one.\n\n### Versions\n\nPyTorch version: 2.0.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: None\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: macOS 13.2.1 (arm64)\r\nGCC version: Could not collect\r\nClang version: 14.0.0 (clang-1400.0.29.202)\r\nCMake version: Could not collect\r\nLibc version: N/A\r\n\r\nPython version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)\r\nPython platform: macOS-13.2.1-arm64-arm-64bit\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nCUDA_MODULE_LOADING set to: N/A\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nApple M1 Pro\r\n\r\nVersions of relevant libraries:\r\n[pip3] mypy-extensions==1.0.0\r\n[pip3] numpy==1.24.2\r\n[pip3] torch==2.0.0\r\n[pip3] torch-tb-profiler==0.4.1\r\n[pip3] torchvision==0.15.1\r\n[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge\r\n[conda] pytorch 2.0.0 py3.10_0 pytorch\r\n[conda] torch 2.0.0 pypi_0 pypi\r\n[conda] torch-tb-profiler 0.4.1 pypi_0 pypi\r\n[conda] torchvision 0.15.1 pypi_0 pypi\n\ncc @pmeier\n", "before_files": [{"content": "import os.path\nfrom typing import Callable, Optional\n\nimport numpy as np\nimport torch\nfrom torchvision.datasets.utils import download_url, verify_str_arg\nfrom torchvision.datasets.vision import VisionDataset\n\n\nclass MovingMNIST(VisionDataset):\n \"\"\"`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.\n split (string, optional): The dataset split, supports ``None`` (default), ``\"train\"`` and ``\"test\"``.\n If ``split=None``, the full data is returned.\n split_ratio (int, optional): The split ratio of number of frames. If ``split=\"train\"``, the first split\n frames ``data[:, :split_ratio]`` is returned. If ``split=\"test\"``, the last split frames ``data[:, split_ratio:]``\n is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.\n transform (callable, optional): A function/transform that takes in an torch Tensor\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n _URL = \"http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy\"\n\n def __init__(\n self,\n root: str,\n split: Optional[str] = None,\n split_ratio: int = 10,\n download: bool = False,\n transform: Optional[Callable] = None,\n ) -> None:\n super().__init__(root, transform=transform)\n\n self._base_folder = os.path.join(self.root, self.__class__.__name__)\n self._filename = self._URL.split(\"/\")[-1]\n\n if split is not None:\n verify_str_arg(split, \"split\", (\"train\", \"test\"))\n self.split = split\n\n if not isinstance(split_ratio, int):\n raise TypeError(f\"`split_ratio` should be an integer, but got {type(split_ratio)}\")\n elif not (1 <= split_ratio <= 19):\n raise ValueError(f\"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.\")\n self.split_ratio = split_ratio\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError(\"Dataset not found. You can use download=True to download it.\")\n\n data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))\n if self.split == \"train\":\n data = data[: self.split_ratio]\n else:\n data = data[self.split_ratio :]\n self.data = data.transpose(0, 1).unsqueeze(2).contiguous()\n\n def __getitem__(self, idx: int) -> torch.Tensor:\n \"\"\"\n Args:\n index (int): Index\n Returns:\n torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.\n \"\"\"\n data = self.data[idx]\n if self.transform is not None:\n data = self.transform(data)\n\n return data\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _check_exists(self) -> bool:\n return os.path.exists(os.path.join(self._base_folder, self._filename))\n\n def download(self) -> None:\n if self._check_exists():\n return\n\n download_url(\n url=self._URL,\n root=self._base_folder,\n filename=self._filename,\n md5=\"be083ec986bfe91a449d63653c411eb2\",\n )\n", "path": "torchvision/datasets/moving_mnist.py"}], "after_files": [{"content": "import os.path\nfrom typing import Callable, Optional\n\nimport numpy as np\nimport torch\nfrom torchvision.datasets.utils import download_url, verify_str_arg\nfrom torchvision.datasets.vision import VisionDataset\n\n\nclass MovingMNIST(VisionDataset):\n \"\"\"`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.\n split (string, optional): The dataset split, supports ``None`` (default), ``\"train\"`` and ``\"test\"``.\n If ``split=None``, the full data is returned.\n split_ratio (int, optional): The split ratio of number of frames. If ``split=\"train\"``, the first split\n frames ``data[:, :split_ratio]`` is returned. If ``split=\"test\"``, the last split frames ``data[:, split_ratio:]``\n is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.\n transform (callable, optional): A function/transform that takes in an torch Tensor\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n _URL = \"http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy\"\n\n def __init__(\n self,\n root: str,\n split: Optional[str] = None,\n split_ratio: int = 10,\n download: bool = False,\n transform: Optional[Callable] = None,\n ) -> None:\n super().__init__(root, transform=transform)\n\n self._base_folder = os.path.join(self.root, self.__class__.__name__)\n self._filename = self._URL.split(\"/\")[-1]\n\n if split is not None:\n verify_str_arg(split, \"split\", (\"train\", \"test\"))\n self.split = split\n\n if not isinstance(split_ratio, int):\n raise TypeError(f\"`split_ratio` should be an integer, but got {type(split_ratio)}\")\n elif not (1 <= split_ratio <= 19):\n raise ValueError(f\"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.\")\n self.split_ratio = split_ratio\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError(\"Dataset not found. You can use download=True to download it.\")\n\n data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))\n if self.split == \"train\":\n data = data[: self.split_ratio]\n elif self.split == \"test\":\n data = data[self.split_ratio :]\n self.data = data.transpose(0, 1).unsqueeze(2).contiguous()\n\n def __getitem__(self, idx: int) -> torch.Tensor:\n \"\"\"\n Args:\n index (int): Index\n Returns:\n torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.\n \"\"\"\n data = self.data[idx]\n if self.transform is not None:\n data = self.transform(data)\n\n return data\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _check_exists(self) -> bool:\n return os.path.exists(os.path.join(self._base_folder, self._filename))\n\n def download(self) -> None:\n if self._check_exists():\n return\n\n download_url(\n url=self._URL,\n root=self._base_folder,\n filename=self._filename,\n md5=\"be083ec986bfe91a449d63653c411eb2\",\n )\n", "path": "torchvision/datasets/moving_mnist.py"}]} | 2,964 | 141 |
gh_patches_debug_41042 | rasdani/github-patches | git_diff | conda__conda-2915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Regression] Conda create environment fails on lock if root environment is not under user control
This issue is introduced in Conda 4.1.0 (Conda 4.0.8 works fine).
```
$ conda create -n root2 python=2 [123/1811]
Fetching package metadata .......
Solving package specifications .............
Package plan for installation in environment /home/frol/.conda/envs/root2:
The following NEW packages will be INSTALLED:
openssl: 1.0.2h-1 (soft-link)
pip: 8.1.2-py27_0 (soft-link)
python: 2.7.11-0 (soft-link)
readline: 6.2-2 (soft-link)
setuptools: 23.0.0-py27_0 (soft-link)
sqlite: 3.13.0-0 (soft-link)
tk: 8.5.18-0 (soft-link)
wheel: 0.29.0-py27_0 (soft-link)
zlib: 1.2.8-3 (soft-link)
Proceed ([y]/n)?
Linking packages ...
An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Include the output of the command 'conda info' in your report.
Traceback (most recent call last):
File "/usr/local/miniconda/bin/conda", line 6, in <module>
sys.exit(main())
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py", line 120, in main
args_func(args, p)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py", line 127, in args_func
args.func(args, p)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main_create.py", line 57, in execute
install(args, parser, 'create')
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/install.py", line 407, in install
execute_actions(actions, index, verbose=not args.quiet)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/plan.py", line 566, in execute_actions
inst.execute_instructions(plan, index, verbose)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py", line 137, in execute_instructions
cmd(state, arg)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py", line 80, in LINK_CMD
link(state['prefix'], dist, lt, index=state['index'], shortcuts=shortcuts)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/install.py", line 1035, in link
with Locked(prefix), Locked(pkgs_dir):
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/lock.py", line 60, in __enter__
os.makedirs(self.lock_path)
File "/usr/local/miniconda/lib/python2.7/os.py", line 157, in makedirs
mkdir(name, mode)
OSError: [Errno 13] Permission denied: '/usr/local/miniconda/pkgs/.conda_lock-949'
```
`/usr/local/miniconda/` is a system-wide installation of miniconda, so obviously, users cannot create lock files there.
P.S. I have a dream that updating conda software won't break things on every release...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/lock.py`
Content:
```
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 """
8 Tools for working with locks
9
10 A lock is just an empty directory. We use directories because this lets us use
11 the race condition-proof os.makedirs.
12
13 For now, there is one global lock for all of conda, because some things happen
14 globally (such as downloading packages).
15
16 We don't raise an error if the lock is named with the current PID
17 """
18 from __future__ import absolute_import, division, print_function
19
20 import logging
21 import os
22 import time
23
24 from .exceptions import LockError
25
26 LOCKFN = '.conda_lock'
27
28
29 stdoutlog = logging.getLogger('stdoutlog')
30
31
32 class Locked(object):
33 """
34 Context manager to handle locks.
35 """
36 def __init__(self, path, retries=10):
37 self.path = path
38 self.end = "-" + str(os.getpid())
39 self.lock_path = os.path.join(self.path, LOCKFN + self.end)
40 self.retries = retries
41
42 def __enter__(self):
43 # Keep the string "LOCKERROR" in this string so that external
44 # programs can look for it.
45 lockstr = ("""\
46 LOCKERROR: It looks like conda is already doing something.
47 The lock %s was found. Wait for it to finish before continuing.
48 If you are sure that conda is not running, remove it and try again.
49 You can also use: $ conda clean --lock\n""")
50 sleeptime = 1
51
52 for _ in range(self.retries):
53 if os.path.isdir(self.lock_path):
54 stdoutlog.info(lockstr % self.lock_path)
55 stdoutlog.info("Sleeping for %s seconds\n" % sleeptime)
56
57 time.sleep(sleeptime)
58 sleeptime *= 2
59 else:
60 os.makedirs(self.lock_path)
61 return self
62
63 stdoutlog.error("Exceeded max retries, giving up")
64 raise LockError(lockstr % self.lock_path)
65
66 def __exit__(self, exc_type, exc_value, traceback):
67 try:
68 os.rmdir(self.lock_path)
69 os.rmdir(self.path)
70 except OSError:
71 pass
72
```
Path: `conda/exceptions.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 class CondaException(Exception):
4 pass
5
6
7 class InvalidInstruction(CondaException):
8 def __init__(self, instruction, *args, **kwargs):
9 msg = "No handler for instruction: %r" % instruction
10 super(InvalidInstruction, self).__init__(msg, *args, **kwargs)
11
12 class LockError(RuntimeError, CondaException):
13 pass
14
```
Path: `conda/egg_info.py`
Content:
```
1 """
2 Functions related to core conda functionality that relates to manually
3 installed Python packages, e.g. using "python setup.py install", or "pip".
4 """
5 from __future__ import absolute_import, division, print_function
6
7 from io import open
8 import os
9 import re
10 import sys
11 from os.path import isdir, isfile, join
12
13 from .compat import itervalues
14 from .install import linked_data
15 from .misc import rel_path
16
17
18 def get_site_packages_dir(installed_pkgs):
19 for info in itervalues(installed_pkgs):
20 if info['name'] == 'python':
21 if sys.platform == 'win32':
22 stdlib_dir = 'Lib'
23 else:
24 py_ver = info['version'][:3]
25 stdlib_dir = 'lib/python%s' % py_ver
26 return join(stdlib_dir, 'site-packages')
27 return None
28
29
30 def get_egg_info_files(sp_dir):
31 for fn in os.listdir(sp_dir):
32 if not fn.endswith(('.egg', '.egg-info', '.dist-info')):
33 continue
34 path = join(sp_dir, fn)
35 if isfile(path):
36 yield path
37 elif isdir(path):
38 for path2 in [join(path, 'PKG-INFO'),
39 join(path, 'EGG-INFO', 'PKG-INFO'),
40 join(path, 'METADATA')]:
41 if isfile(path2):
42 yield path2
43
44
45 pat = re.compile(r'(\w+):\s*(\S+)', re.I)
46 def parse_egg_info(path):
47 """
48 Parse an .egg-info file and return its canonical distribution name
49 """
50 info = {}
51 for line in open(path, encoding='utf-8'):
52 line = line.strip()
53 m = pat.match(line)
54 if m:
55 key = m.group(1).lower()
56 info[key] = m.group(2)
57 try:
58 return '%(name)s-%(version)s-<pip>' % info
59 except KeyError:
60 pass
61 return None
62
63
64 def get_egg_info(prefix, all_pkgs=False):
65 """
66 Return a set of canonical names of all Python packages (in `prefix`),
67 by inspecting the .egg-info files inside site-packages.
68 By default, only untracked (not conda installed) .egg-info files are
69 considered. Setting `all_pkgs` to True changes this.
70 """
71 installed_pkgs = linked_data(prefix)
72 sp_dir = get_site_packages_dir(installed_pkgs)
73 if sp_dir is None:
74 return set()
75
76 conda_files = set()
77 for info in itervalues(installed_pkgs):
78 conda_files.update(info.get('files', []))
79
80 res = set()
81 for path in get_egg_info_files(join(prefix, sp_dir)):
82 f = rel_path(prefix, path)
83 if all_pkgs or f not in conda_files:
84 try:
85 dist = parse_egg_info(path)
86 except UnicodeDecodeError:
87 dist = None
88 if dist:
89 res.add(dist)
90 return res
91
92
93 if __name__ == '__main__':
94 from pprint import pprint
95 pprint(get_egg_info(sys.prefix))
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/egg_info.py b/conda/egg_info.py
--- a/conda/egg_info.py
+++ b/conda/egg_info.py
@@ -15,6 +15,7 @@
from .misc import rel_path
+
def get_site_packages_dir(installed_pkgs):
for info in itervalues(installed_pkgs):
if info['name'] == 'python':
diff --git a/conda/exceptions.py b/conda/exceptions.py
--- a/conda/exceptions.py
+++ b/conda/exceptions.py
@@ -8,6 +8,3 @@
def __init__(self, instruction, *args, **kwargs):
msg = "No handler for instruction: %r" % instruction
super(InvalidInstruction, self).__init__(msg, *args, **kwargs)
-
-class LockError(RuntimeError, CondaException):
- pass
diff --git a/conda/lock.py b/conda/lock.py
--- a/conda/lock.py
+++ b/conda/lock.py
@@ -17,11 +17,11 @@
"""
from __future__ import absolute_import, division, print_function
-import logging
import os
-import time
-
-from .exceptions import LockError
+import logging
+from os.path import join
+import glob
+from time import sleep
LOCKFN = '.conda_lock'
@@ -33,13 +33,15 @@
"""
Context manager to handle locks.
"""
- def __init__(self, path, retries=10):
+ def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
- self.lock_path = os.path.join(self.path, LOCKFN + self.end)
- self.retries = retries
+ self.lock_path = join(self.path, LOCKFN + self.end)
+ self.pattern = join(self.path, LOCKFN + '-*')
+ self.remove = True
def __enter__(self):
+ retries = 10
# Keep the string "LOCKERROR" in this string so that external
# programs can look for it.
lockstr = ("""\
@@ -48,24 +50,33 @@
If you are sure that conda is not running, remove it and try again.
You can also use: $ conda clean --lock\n""")
sleeptime = 1
-
- for _ in range(self.retries):
- if os.path.isdir(self.lock_path):
- stdoutlog.info(lockstr % self.lock_path)
+ files = None
+ while retries:
+ files = glob.glob(self.pattern)
+ if files and not files[0].endswith(self.end):
+ stdoutlog.info(lockstr % str(files))
stdoutlog.info("Sleeping for %s seconds\n" % sleeptime)
-
- time.sleep(sleeptime)
+ sleep(sleeptime)
sleeptime *= 2
+ retries -= 1
else:
- os.makedirs(self.lock_path)
- return self
+ break
+ else:
+ stdoutlog.error("Exceeded max retries, giving up")
+ raise RuntimeError(lockstr % str(files))
- stdoutlog.error("Exceeded max retries, giving up")
- raise LockError(lockstr % self.lock_path)
+ if not files:
+ try:
+ os.makedirs(self.lock_path)
+ except OSError:
+ pass
+ else: # PID lock already here --- someone else will remove it.
+ self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
- try:
- os.rmdir(self.lock_path)
- os.rmdir(self.path)
- except OSError:
- pass
+ if self.remove:
+ for path in self.lock_path, self.path:
+ try:
+ os.rmdir(path)
+ except OSError:
+ pass
| {"golden_diff": "diff --git a/conda/egg_info.py b/conda/egg_info.py\n--- a/conda/egg_info.py\n+++ b/conda/egg_info.py\n@@ -15,6 +15,7 @@\n from .misc import rel_path\n \n \n+\n def get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\ndiff --git a/conda/exceptions.py b/conda/exceptions.py\n--- a/conda/exceptions.py\n+++ b/conda/exceptions.py\n@@ -8,6 +8,3 @@\n def __init__(self, instruction, *args, **kwargs):\n msg = \"No handler for instruction: %r\" % instruction\n super(InvalidInstruction, self).__init__(msg, *args, **kwargs)\n-\n-class LockError(RuntimeError, CondaException):\n- pass\ndiff --git a/conda/lock.py b/conda/lock.py\n--- a/conda/lock.py\n+++ b/conda/lock.py\n@@ -17,11 +17,11 @@\n \"\"\"\n from __future__ import absolute_import, division, print_function\n \n-import logging\n import os\n-import time\n-\n-from .exceptions import LockError\n+import logging\n+from os.path import join\n+import glob\n+from time import sleep\n \n LOCKFN = '.conda_lock'\n \n@@ -33,13 +33,15 @@\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n- def __init__(self, path, retries=10):\n+ def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n- self.lock_path = os.path.join(self.path, LOCKFN + self.end)\n- self.retries = retries\n+ self.lock_path = join(self.path, LOCKFN + self.end)\n+ self.pattern = join(self.path, LOCKFN + '-*')\n+ self.remove = True\n \n def __enter__(self):\n+ retries = 10\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n@@ -48,24 +50,33 @@\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\\n\"\"\")\n sleeptime = 1\n-\n- for _ in range(self.retries):\n- if os.path.isdir(self.lock_path):\n- stdoutlog.info(lockstr % self.lock_path)\n+ files = None\n+ while retries:\n+ files = glob.glob(self.pattern)\n+ if files and not files[0].endswith(self.end):\n+ stdoutlog.info(lockstr % str(files))\n stdoutlog.info(\"Sleeping for %s seconds\\n\" % sleeptime)\n-\n- time.sleep(sleeptime)\n+ sleep(sleeptime)\n sleeptime *= 2\n+ retries -= 1\n else:\n- os.makedirs(self.lock_path)\n- return self\n+ break\n+ else:\n+ stdoutlog.error(\"Exceeded max retries, giving up\")\n+ raise RuntimeError(lockstr % str(files))\n \n- stdoutlog.error(\"Exceeded max retries, giving up\")\n- raise LockError(lockstr % self.lock_path)\n+ if not files:\n+ try:\n+ os.makedirs(self.lock_path)\n+ except OSError:\n+ pass\n+ else: # PID lock already here --- someone else will remove it.\n+ self.remove = False\n \n def __exit__(self, exc_type, exc_value, traceback):\n- try:\n- os.rmdir(self.lock_path)\n- os.rmdir(self.path)\n- except OSError:\n- pass\n+ if self.remove:\n+ for path in self.lock_path, self.path:\n+ try:\n+ os.rmdir(path)\n+ except OSError:\n+ pass\n", "issue": "[Regression] Conda create environment fails on lock if root environment is not under user control\nThis issue is introduced in Conda 4.1.0 (Conda 4.0.8 works fine).\n\n```\n$ conda create -n root2 python=2 [123/1811]\nFetching package metadata .......\nSolving package specifications .............\n\nPackage plan for installation in environment /home/frol/.conda/envs/root2:\n\nThe following NEW packages will be INSTALLED:\n\n openssl: 1.0.2h-1 (soft-link)\n pip: 8.1.2-py27_0 (soft-link)\n python: 2.7.11-0 (soft-link)\n readline: 6.2-2 (soft-link)\n setuptools: 23.0.0-py27_0 (soft-link)\n sqlite: 3.13.0-0 (soft-link)\n tk: 8.5.18-0 (soft-link)\n wheel: 0.29.0-py27_0 (soft-link)\n zlib: 1.2.8-3 (soft-link)\n\nProceed ([y]/n)?\n\nLinking packages ...\nAn unexpected error has occurred, please consider sending the\nfollowing traceback to the conda GitHub issue tracker at:\n\n https://github.com/conda/conda/issues\n\nInclude the output of the command 'conda info' in your report.\n\n\nTraceback (most recent call last):\n File \"/usr/local/miniconda/bin/conda\", line 6, in <module>\n sys.exit(main())\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py\", line 120, in main\n args_func(args, p)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py\", line 127, in args_func\n args.func(args, p)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main_create.py\", line 57, in execute\n install(args, parser, 'create')\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/install.py\", line 407, in install\n execute_actions(actions, index, verbose=not args.quiet)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/plan.py\", line 566, in execute_actions\n inst.execute_instructions(plan, index, verbose)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py\", line 137, in execute_instructions\n cmd(state, arg)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py\", line 80, in LINK_CMD\n link(state['prefix'], dist, lt, index=state['index'], shortcuts=shortcuts)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/install.py\", line 1035, in link\n with Locked(prefix), Locked(pkgs_dir):\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/lock.py\", line 60, in __enter__\n os.makedirs(self.lock_path)\n File \"/usr/local/miniconda/lib/python2.7/os.py\", line 157, in makedirs\n mkdir(name, mode)\nOSError: [Errno 13] Permission denied: '/usr/local/miniconda/pkgs/.conda_lock-949'\n```\n\n`/usr/local/miniconda/` is a system-wide installation of miniconda, so obviously, users cannot create lock files there.\n\nP.S. I have a dream that updating conda software won't break things on every release...\n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport time\n\nfrom .exceptions import LockError\n\nLOCKFN = '.conda_lock'\n\n\nstdoutlog = logging.getLogger('stdoutlog')\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path, retries=10):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = os.path.join(self.path, LOCKFN + self.end)\n self.retries = retries\n\n def __enter__(self):\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n LOCKERROR: It looks like conda is already doing something.\n The lock %s was found. Wait for it to finish before continuing.\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\\n\"\"\")\n sleeptime = 1\n\n for _ in range(self.retries):\n if os.path.isdir(self.lock_path):\n stdoutlog.info(lockstr % self.lock_path)\n stdoutlog.info(\"Sleeping for %s seconds\\n\" % sleeptime)\n\n time.sleep(sleeptime)\n sleeptime *= 2\n else:\n os.makedirs(self.lock_path)\n return self\n\n stdoutlog.error(\"Exceeded max retries, giving up\")\n raise LockError(lockstr % self.lock_path)\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n os.rmdir(self.lock_path)\n os.rmdir(self.path)\n except OSError:\n pass\n", "path": "conda/lock.py"}, {"content": "from __future__ import absolute_import, division, print_function\n\nclass CondaException(Exception):\n pass\n\n\nclass InvalidInstruction(CondaException):\n def __init__(self, instruction, *args, **kwargs):\n msg = \"No handler for instruction: %r\" % instruction\n super(InvalidInstruction, self).__init__(msg, *args, **kwargs)\n\nclass LockError(RuntimeError, CondaException):\n pass\n", "path": "conda/exceptions.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom io import open\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom .compat import itervalues\nfrom .install import linked_data\nfrom .misc import rel_path\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if sys.platform == 'win32':\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(dist)\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport logging\nfrom os.path import join\nimport glob\nfrom time import sleep\n\nLOCKFN = '.conda_lock'\n\n\nstdoutlog = logging.getLogger('stdoutlog')\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = join(self.path, LOCKFN + self.end)\n self.pattern = join(self.path, LOCKFN + '-*')\n self.remove = True\n\n def __enter__(self):\n retries = 10\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n LOCKERROR: It looks like conda is already doing something.\n The lock %s was found. Wait for it to finish before continuing.\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\\n\"\"\")\n sleeptime = 1\n files = None\n while retries:\n files = glob.glob(self.pattern)\n if files and not files[0].endswith(self.end):\n stdoutlog.info(lockstr % str(files))\n stdoutlog.info(\"Sleeping for %s seconds\\n\" % sleeptime)\n sleep(sleeptime)\n sleeptime *= 2\n retries -= 1\n else:\n break\n else:\n stdoutlog.error(\"Exceeded max retries, giving up\")\n raise RuntimeError(lockstr % str(files))\n\n if not files:\n try:\n os.makedirs(self.lock_path)\n except OSError:\n pass\n else: # PID lock already here --- someone else will remove it.\n self.remove = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.remove:\n for path in self.lock_path, self.path:\n try:\n os.rmdir(path)\n except OSError:\n pass\n", "path": "conda/lock.py"}, {"content": "from __future__ import absolute_import, division, print_function\n\nclass CondaException(Exception):\n pass\n\n\nclass InvalidInstruction(CondaException):\n def __init__(self, instruction, *args, **kwargs):\n msg = \"No handler for instruction: %r\" % instruction\n super(InvalidInstruction, self).__init__(msg, *args, **kwargs)\n", "path": "conda/exceptions.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom io import open\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom .compat import itervalues\nfrom .install import linked_data\nfrom .misc import rel_path\n\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if sys.platform == 'win32':\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(dist)\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}]} | 2,762 | 865 |
gh_patches_debug_1954 | rasdani/github-patches | git_diff | ivy-llc__ivy-19363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
T
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/devicearray.py`
Content:
```
1 # global
2
3 # local
4 import ivy
5 import ivy.functional.frontends.jax as jax_frontend
6
7
8 class DeviceArray:
9 def __init__(self, array, weak_type=False):
10 self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)
11 self.weak_type = weak_type
12
13 def __repr__(self):
14 main = (
15 str(self.ivy_array.__repr__())
16 .replace("ivy.array", "ivy.frontends.jax.DeviceArray")
17 .replace(")", "")
18 + ", dtype="
19 + str(self.ivy_array.dtype)
20 )
21 if self.weak_type:
22 return main + ", weak_type=True)"
23 return main + ")"
24
25 # Properties #
26 # ---------- #
27
28 @property
29 def ivy_array(self):
30 return self._ivy_array
31
32 @property
33 def dtype(self):
34 return self.ivy_array.dtype
35
36 @property
37 def shape(self):
38 return self.ivy_array.shape
39
40 @property
41 def at(self):
42 return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)
43
44 # Instance Methods #
45 # ---------------- #
46
47 def all(self, *, axis=None, out=None, keepdims=False):
48 return jax_frontend.numpy.all(
49 self._ivy_array, axis=axis, keepdims=keepdims, out=out
50 )
51
52 def argmax(
53 self,
54 /,
55 *,
56 axis=None,
57 out=None,
58 keepdims=False,
59 ):
60 return jax_frontend.numpy.argmax(
61 self,
62 axis=axis,
63 out=out,
64 keepdims=keepdims,
65 )
66
67 def conj(self, /):
68 return jax_frontend.numpy.conj(self._ivy_array)
69
70 def conjugate(self, /):
71 return jax_frontend.numpy.conjugate(self._ivy_array)
72
73 def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=None):
74 return jax_frontend.numpy.mean(
75 self._ivy_array,
76 axis=axis,
77 dtype=dtype,
78 out=out,
79 keepdims=keepdims,
80 where=where,
81 )
82
83 def cumprod(self, axis=None, dtype=None, out=None):
84 return jax_frontend.numpy.cumprod(
85 self,
86 axis=axis,
87 dtype=dtype,
88 out=out,
89 )
90
91 def cumsum(self, axis=None, dtype=None, out=None):
92 return jax_frontend.numpy.cumsum(
93 self,
94 axis=axis,
95 dtype=dtype,
96 out=out,
97 )
98
99 def nonzero(self, *, size=None, fill_value=None):
100 return jax_frontend.numpy.nonzero(
101 self,
102 size=size,
103 fill_value=fill_value,
104 )
105
106 def ravel(self, order="C"):
107 return jax_frontend.numpy.ravel(
108 self,
109 order=order,
110 )
111
112 def sort(self, axis=-1, order=None):
113 return jax_frontend.numpy.sort(
114 self,
115 axis=axis,
116 order=order,
117 )
118
119 def __add__(self, other):
120 return jax_frontend.numpy.add(self, other)
121
122 def __radd__(self, other):
123 return jax_frontend.numpy.add(other, self)
124
125 def __sub__(self, other):
126 return jax_frontend.lax.sub(self, other)
127
128 def __rsub__(self, other):
129 return jax_frontend.lax.sub(other, self)
130
131 def __mul__(self, other):
132 return jax_frontend.lax.mul(self, other)
133
134 def __rmul__(self, other):
135 return jax_frontend.lax.mul(other, self)
136
137 def __div__(self, other):
138 return jax_frontend.numpy.divide(self, other)
139
140 def __rdiv__(self, other):
141 return jax_frontend.numpy.divide(other, self)
142
143 def __mod__(self, other):
144 return jax_frontend.numpy.mod(self, other)
145
146 def __rmod__(self, other):
147 return jax_frontend.numpy.mod(other, self)
148
149 def __truediv__(self, other):
150 return jax_frontend.numpy.divide(self, other)
151
152 def __rtruediv__(self, other):
153 return jax_frontend.numpy.divide(other, self)
154
155 def __matmul__(self, other):
156 return jax_frontend.numpy.dot(self, other)
157
158 def __rmatmul__(self, other):
159 return jax_frontend.numpy.dot(other, self)
160
161 def __pos__(self):
162 return self
163
164 def __neg__(self):
165 return jax_frontend.lax.neg(self)
166
167 def __eq__(self, other):
168 return jax_frontend.lax.eq(self, other)
169
170 def __ne__(self, other):
171 return jax_frontend.lax.ne(self, other)
172
173 def __lt__(self, other):
174 return jax_frontend.lax.lt(self, other)
175
176 def __le__(self, other):
177 return jax_frontend.lax.le(self, other)
178
179 def __gt__(self, other):
180 return jax_frontend.lax.gt(self, other)
181
182 def __ge__(self, other):
183 return jax_frontend.lax.ge(self, other)
184
185 def __abs__(self):
186 return jax_frontend.numpy.abs(self)
187
188 def __pow__(self, other):
189 return jax_frontend.lax.pow(self, other)
190
191 def __rpow__(self, other):
192 other = ivy.asarray(other)
193 return jax_frontend.lax.pow(other, self)
194
195 def __and__(self, other):
196 return jax_frontend.numpy.bitwise_and(self, other)
197
198 def __rand__(self, other):
199 return jax_frontend.numpy.bitwise_and(other, self)
200
201 def __or__(self, other):
202 return jax_frontend.numpy.bitwise_or(self, other)
203
204 def __ror__(self, other):
205 return jax_frontend.numpy.bitwise_or(other, self)
206
207 def __xor__(self, other):
208 return jax_frontend.lax.bitwise_xor(self, other)
209
210 def __rxor__(self, other):
211 return jax_frontend.lax.bitwise_xor(other, self)
212
213 def __invert__(self):
214 return jax_frontend.lax.bitwise_not(self)
215
216 def __lshift__(self, other):
217 return jax_frontend.lax.shift_left(self, other)
218
219 def __rlshift__(self, other):
220 return jax_frontend.lax.shift_left(other, self)
221
222 def __rshift__(self, other):
223 return jax_frontend.lax.shift_right_logical(self, other)
224
225 def __rrshift__(self, other):
226 return jax_frontend.lax.shift_right_logical(other, self)
227
228 def __getitem__(self, idx):
229 return self.at[idx].get()
230
231 def __setitem__(self, idx, val):
232 raise ivy.utils.exceptions.IvyException(
233 "ivy.functional.frontends.jax.DeviceArray object doesn't support assignment"
234 )
235
236 def __iter__(self):
237 ndim = len(self.shape)
238 if ndim == 0:
239 raise TypeError("iteration over a 0-d devicearray not supported")
240 for i in range(self.shape[0]):
241 yield self[i]
242
243 def round(self, decimals=0):
244 return jax_frontend.numpy.round(self, decimals)
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/jax/devicearray.py b/ivy/functional/frontends/jax/devicearray.py
--- a/ivy/functional/frontends/jax/devicearray.py
+++ b/ivy/functional/frontends/jax/devicearray.py
@@ -41,6 +41,10 @@
def at(self):
return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)
+ @property
+ def T(self):
+ return self.ivy_array.T
+
# Instance Methods #
# ---------------- #
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/devicearray.py b/ivy/functional/frontends/jax/devicearray.py\n--- a/ivy/functional/frontends/jax/devicearray.py\n+++ b/ivy/functional/frontends/jax/devicearray.py\n@@ -41,6 +41,10 @@\n def at(self):\n return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)\n \n+ @property\n+ def T(self):\n+ return self.ivy_array.T\n+\n # Instance Methods #\n # ---------------- #\n", "issue": "T\n\n", "before_files": [{"content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.jax as jax_frontend\n\n\nclass DeviceArray:\n def __init__(self, array, weak_type=False):\n self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)\n self.weak_type = weak_type\n\n def __repr__(self):\n main = (\n str(self.ivy_array.__repr__())\n .replace(\"ivy.array\", \"ivy.frontends.jax.DeviceArray\")\n .replace(\")\", \"\")\n + \", dtype=\"\n + str(self.ivy_array.dtype)\n )\n if self.weak_type:\n return main + \", weak_type=True)\"\n return main + \")\"\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def dtype(self):\n return self.ivy_array.dtype\n\n @property\n def shape(self):\n return self.ivy_array.shape\n\n @property\n def at(self):\n return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)\n\n # Instance Methods #\n # ---------------- #\n\n def all(self, *, axis=None, out=None, keepdims=False):\n return jax_frontend.numpy.all(\n self._ivy_array, axis=axis, keepdims=keepdims, out=out\n )\n\n def argmax(\n self,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n ):\n return jax_frontend.numpy.argmax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n )\n\n def conj(self, /):\n return jax_frontend.numpy.conj(self._ivy_array)\n\n def conjugate(self, /):\n return jax_frontend.numpy.conjugate(self._ivy_array)\n\n def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=None):\n return jax_frontend.numpy.mean(\n self._ivy_array,\n axis=axis,\n dtype=dtype,\n out=out,\n keepdims=keepdims,\n where=where,\n )\n\n def cumprod(self, axis=None, dtype=None, out=None):\n return jax_frontend.numpy.cumprod(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def cumsum(self, axis=None, dtype=None, out=None):\n return jax_frontend.numpy.cumsum(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def nonzero(self, *, size=None, fill_value=None):\n return jax_frontend.numpy.nonzero(\n self,\n size=size,\n fill_value=fill_value,\n )\n\n def ravel(self, order=\"C\"):\n return jax_frontend.numpy.ravel(\n self,\n order=order,\n )\n\n def sort(self, axis=-1, order=None):\n return jax_frontend.numpy.sort(\n self,\n axis=axis,\n order=order,\n )\n\n def __add__(self, other):\n return jax_frontend.numpy.add(self, other)\n\n def __radd__(self, other):\n return jax_frontend.numpy.add(other, self)\n\n def __sub__(self, other):\n return jax_frontend.lax.sub(self, other)\n\n def __rsub__(self, other):\n return jax_frontend.lax.sub(other, self)\n\n def __mul__(self, other):\n return jax_frontend.lax.mul(self, other)\n\n def __rmul__(self, other):\n return jax_frontend.lax.mul(other, self)\n\n def __div__(self, other):\n return jax_frontend.numpy.divide(self, other)\n\n def __rdiv__(self, other):\n return jax_frontend.numpy.divide(other, self)\n\n def __mod__(self, other):\n return jax_frontend.numpy.mod(self, other)\n\n def __rmod__(self, other):\n return jax_frontend.numpy.mod(other, self)\n\n def __truediv__(self, other):\n return jax_frontend.numpy.divide(self, other)\n\n def __rtruediv__(self, other):\n return jax_frontend.numpy.divide(other, self)\n\n def __matmul__(self, other):\n return jax_frontend.numpy.dot(self, other)\n\n def __rmatmul__(self, other):\n return jax_frontend.numpy.dot(other, self)\n\n def __pos__(self):\n return self\n\n def __neg__(self):\n return jax_frontend.lax.neg(self)\n\n def __eq__(self, other):\n return jax_frontend.lax.eq(self, other)\n\n def __ne__(self, other):\n return jax_frontend.lax.ne(self, other)\n\n def __lt__(self, other):\n return jax_frontend.lax.lt(self, other)\n\n def __le__(self, other):\n return jax_frontend.lax.le(self, other)\n\n def __gt__(self, other):\n return jax_frontend.lax.gt(self, other)\n\n def __ge__(self, other):\n return jax_frontend.lax.ge(self, other)\n\n def __abs__(self):\n return jax_frontend.numpy.abs(self)\n\n def __pow__(self, other):\n return jax_frontend.lax.pow(self, other)\n\n def __rpow__(self, other):\n other = ivy.asarray(other)\n return jax_frontend.lax.pow(other, self)\n\n def __and__(self, other):\n return jax_frontend.numpy.bitwise_and(self, other)\n\n def __rand__(self, other):\n return jax_frontend.numpy.bitwise_and(other, self)\n\n def __or__(self, other):\n return jax_frontend.numpy.bitwise_or(self, other)\n\n def __ror__(self, other):\n return jax_frontend.numpy.bitwise_or(other, self)\n\n def __xor__(self, other):\n return jax_frontend.lax.bitwise_xor(self, other)\n\n def __rxor__(self, other):\n return jax_frontend.lax.bitwise_xor(other, self)\n\n def __invert__(self):\n return jax_frontend.lax.bitwise_not(self)\n\n def __lshift__(self, other):\n return jax_frontend.lax.shift_left(self, other)\n\n def __rlshift__(self, other):\n return jax_frontend.lax.shift_left(other, self)\n\n def __rshift__(self, other):\n return jax_frontend.lax.shift_right_logical(self, other)\n\n def __rrshift__(self, other):\n return jax_frontend.lax.shift_right_logical(other, self)\n\n def __getitem__(self, idx):\n return self.at[idx].get()\n\n def __setitem__(self, idx, val):\n raise ivy.utils.exceptions.IvyException(\n \"ivy.functional.frontends.jax.DeviceArray object doesn't support assignment\"\n )\n\n def __iter__(self):\n ndim = len(self.shape)\n if ndim == 0:\n raise TypeError(\"iteration over a 0-d devicearray not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n def round(self, decimals=0):\n return jax_frontend.numpy.round(self, decimals)\n", "path": "ivy/functional/frontends/jax/devicearray.py"}], "after_files": [{"content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.jax as jax_frontend\n\n\nclass DeviceArray:\n def __init__(self, array, weak_type=False):\n self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)\n self.weak_type = weak_type\n\n def __repr__(self):\n main = (\n str(self.ivy_array.__repr__())\n .replace(\"ivy.array\", \"ivy.frontends.jax.DeviceArray\")\n .replace(\")\", \"\")\n + \", dtype=\"\n + str(self.ivy_array.dtype)\n )\n if self.weak_type:\n return main + \", weak_type=True)\"\n return main + \")\"\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def dtype(self):\n return self.ivy_array.dtype\n\n @property\n def shape(self):\n return self.ivy_array.shape\n\n @property\n def at(self):\n return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)\n\n @property\n def T(self):\n return self.ivy_array.T\n\n # Instance Methods #\n # ---------------- #\n\n def all(self, *, axis=None, out=None, keepdims=False):\n return jax_frontend.numpy.all(\n self._ivy_array, axis=axis, keepdims=keepdims, out=out\n )\n\n def argmax(\n self,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n ):\n return jax_frontend.numpy.argmax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n )\n\n def conj(self, /):\n return jax_frontend.numpy.conj(self._ivy_array)\n\n def conjugate(self, /):\n return jax_frontend.numpy.conjugate(self._ivy_array)\n\n def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=None):\n return jax_frontend.numpy.mean(\n self._ivy_array,\n axis=axis,\n dtype=dtype,\n out=out,\n keepdims=keepdims,\n where=where,\n )\n\n def cumprod(self, axis=None, dtype=None, out=None):\n return jax_frontend.numpy.cumprod(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def cumsum(self, axis=None, dtype=None, out=None):\n return jax_frontend.numpy.cumsum(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def nonzero(self, *, size=None, fill_value=None):\n return jax_frontend.numpy.nonzero(\n self,\n size=size,\n fill_value=fill_value,\n )\n\n def ravel(self, order=\"C\"):\n return jax_frontend.numpy.ravel(\n self,\n order=order,\n )\n\n def sort(self, axis=-1, order=None):\n return jax_frontend.numpy.sort(\n self,\n axis=axis,\n order=order,\n )\n\n def __add__(self, other):\n return jax_frontend.numpy.add(self, other)\n\n def __radd__(self, other):\n return jax_frontend.numpy.add(other, self)\n\n def __sub__(self, other):\n return jax_frontend.lax.sub(self, other)\n\n def __rsub__(self, other):\n return jax_frontend.lax.sub(other, self)\n\n def __mul__(self, other):\n return jax_frontend.lax.mul(self, other)\n\n def __rmul__(self, other):\n return jax_frontend.lax.mul(other, self)\n\n def __div__(self, other):\n return jax_frontend.numpy.divide(self, other)\n\n def __rdiv__(self, other):\n return jax_frontend.numpy.divide(other, self)\n\n def __mod__(self, other):\n return jax_frontend.numpy.mod(self, other)\n\n def __rmod__(self, other):\n return jax_frontend.numpy.mod(other, self)\n\n def __truediv__(self, other):\n return jax_frontend.numpy.divide(self, other)\n\n def __rtruediv__(self, other):\n return jax_frontend.numpy.divide(other, self)\n\n def __matmul__(self, other):\n return jax_frontend.numpy.dot(self, other)\n\n def __rmatmul__(self, other):\n return jax_frontend.numpy.dot(other, self)\n\n def __pos__(self):\n return self\n\n def __neg__(self):\n return jax_frontend.lax.neg(self)\n\n def __eq__(self, other):\n return jax_frontend.lax.eq(self, other)\n\n def __ne__(self, other):\n return jax_frontend.lax.ne(self, other)\n\n def __lt__(self, other):\n return jax_frontend.lax.lt(self, other)\n\n def __le__(self, other):\n return jax_frontend.lax.le(self, other)\n\n def __gt__(self, other):\n return jax_frontend.lax.gt(self, other)\n\n def __ge__(self, other):\n return jax_frontend.lax.ge(self, other)\n\n def __abs__(self):\n return jax_frontend.numpy.abs(self)\n\n def __pow__(self, other):\n return jax_frontend.lax.pow(self, other)\n\n def __rpow__(self, other):\n other = ivy.asarray(other)\n return jax_frontend.lax.pow(other, self)\n\n def __and__(self, other):\n return jax_frontend.numpy.bitwise_and(self, other)\n\n def __rand__(self, other):\n return jax_frontend.numpy.bitwise_and(other, self)\n\n def __or__(self, other):\n return jax_frontend.numpy.bitwise_or(self, other)\n\n def __ror__(self, other):\n return jax_frontend.numpy.bitwise_or(other, self)\n\n def __xor__(self, other):\n return jax_frontend.lax.bitwise_xor(self, other)\n\n def __rxor__(self, other):\n return jax_frontend.lax.bitwise_xor(other, self)\n\n def __invert__(self):\n return jax_frontend.lax.bitwise_not(self)\n\n def __lshift__(self, other):\n return jax_frontend.lax.shift_left(self, other)\n\n def __rlshift__(self, other):\n return jax_frontend.lax.shift_left(other, self)\n\n def __rshift__(self, other):\n return jax_frontend.lax.shift_right_logical(self, other)\n\n def __rrshift__(self, other):\n return jax_frontend.lax.shift_right_logical(other, self)\n\n def __getitem__(self, idx):\n return self.at[idx].get()\n\n def __setitem__(self, idx, val):\n raise ivy.utils.exceptions.IvyException(\n \"ivy.functional.frontends.jax.DeviceArray object doesn't support assignment\"\n )\n\n def __iter__(self):\n ndim = len(self.shape)\n if ndim == 0:\n raise TypeError(\"iteration over a 0-d devicearray not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n def round(self, decimals=0):\n return jax_frontend.numpy.round(self, decimals)\n", "path": "ivy/functional/frontends/jax/devicearray.py"}]} | 2,492 | 125 |
gh_patches_debug_22503 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10012 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynamic_programming/minimum_partition.py`
Content:
```
1 """
2 Partition a set into two subsets such that the difference of subset sums is minimum
3 """
4
5
6 def find_min(arr):
7 n = len(arr)
8 s = sum(arr)
9
10 dp = [[False for x in range(s + 1)] for y in range(n + 1)]
11
12 for i in range(1, n + 1):
13 dp[i][0] = True
14
15 for i in range(1, s + 1):
16 dp[0][i] = False
17
18 for i in range(1, n + 1):
19 for j in range(1, s + 1):
20 dp[i][j] = dp[i][j - 1]
21
22 if arr[i - 1] <= j:
23 dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
24
25 for j in range(int(s / 2), -1, -1):
26 if dp[n][j] is True:
27 diff = s - 2 * j
28 break
29
30 return diff
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py
--- a/dynamic_programming/minimum_partition.py
+++ b/dynamic_programming/minimum_partition.py
@@ -3,13 +3,25 @@
"""
-def find_min(arr):
+def find_min(arr: list[int]) -> int:
+ """
+ >>> find_min([1, 2, 3, 4, 5])
+ 1
+ >>> find_min([5, 5, 5, 5, 5])
+ 5
+ >>> find_min([5, 5, 5, 5])
+ 0
+ >>> find_min([3])
+ 3
+ >>> find_min([])
+ 0
+ """
n = len(arr)
s = sum(arr)
dp = [[False for x in range(s + 1)] for y in range(n + 1)]
- for i in range(1, n + 1):
+ for i in range(n + 1):
dp[i][0] = True
for i in range(1, s + 1):
@@ -17,7 +29,7 @@
for i in range(1, n + 1):
for j in range(1, s + 1):
- dp[i][j] = dp[i][j - 1]
+ dp[i][j] = dp[i - 1][j]
if arr[i - 1] <= j:
dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
@@ -28,3 +40,9 @@
break
return diff
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
| {"golden_diff": "diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py\n--- a/dynamic_programming/minimum_partition.py\n+++ b/dynamic_programming/minimum_partition.py\n@@ -3,13 +3,25 @@\n \"\"\"\n \n \n-def find_min(arr):\n+def find_min(arr: list[int]) -> int:\n+ \"\"\"\n+ >>> find_min([1, 2, 3, 4, 5])\n+ 1\n+ >>> find_min([5, 5, 5, 5, 5])\n+ 5\n+ >>> find_min([5, 5, 5, 5])\n+ 0\n+ >>> find_min([3])\n+ 3\n+ >>> find_min([])\n+ 0\n+ \"\"\"\n n = len(arr)\n s = sum(arr)\n \n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n \n- for i in range(1, n + 1):\n+ for i in range(n + 1):\n dp[i][0] = True\n \n for i in range(1, s + 1):\n@@ -17,7 +29,7 @@\n \n for i in range(1, n + 1):\n for j in range(1, s + 1):\n- dp[i][j] = dp[i][j - 1]\n+ dp[i][j] = dp[i - 1][j]\n \n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n@@ -28,3 +40,9 @@\n break\n \n return diff\n+\n+\n+if __name__ == \"__main__\":\n+ from doctest import testmod\n+\n+ testmod()\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\nPartition a set into two subsets such that the difference of subset sums is minimum\n\"\"\"\n\n\ndef find_min(arr):\n n = len(arr)\n s = sum(arr)\n\n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n\n for i in range(1, n + 1):\n dp[i][0] = True\n\n for i in range(1, s + 1):\n dp[0][i] = False\n\n for i in range(1, n + 1):\n for j in range(1, s + 1):\n dp[i][j] = dp[i][j - 1]\n\n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n\n for j in range(int(s / 2), -1, -1):\n if dp[n][j] is True:\n diff = s - 2 * j\n break\n\n return diff\n", "path": "dynamic_programming/minimum_partition.py"}], "after_files": [{"content": "\"\"\"\nPartition a set into two subsets such that the difference of subset sums is minimum\n\"\"\"\n\n\ndef find_min(arr: list[int]) -> int:\n \"\"\"\n >>> find_min([1, 2, 3, 4, 5])\n 1\n >>> find_min([5, 5, 5, 5, 5])\n 5\n >>> find_min([5, 5, 5, 5])\n 0\n >>> find_min([3])\n 3\n >>> find_min([])\n 0\n \"\"\"\n n = len(arr)\n s = sum(arr)\n\n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n\n for i in range(n + 1):\n dp[i][0] = True\n\n for i in range(1, s + 1):\n dp[0][i] = False\n\n for i in range(1, n + 1):\n for j in range(1, s + 1):\n dp[i][j] = dp[i - 1][j]\n\n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n\n for j in range(int(s / 2), -1, -1):\n if dp[n][j] is True:\n diff = s - 2 * j\n break\n\n return diff\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n", "path": "dynamic_programming/minimum_partition.py"}]} | 1,390 | 416 |
gh_patches_debug_11996 | rasdani/github-patches | git_diff | pypa__pip-4425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install -h output is wrong about pyc
* Pip version: 9.0.1
* Python version: 3.5
* Operating system: Linux
### Description:
pip install -h says
--compile Compile py files to pyc
--no-compile Do not compile py files to pyc
pyc is only correct for some Python interpreters. Not for Python, not for windows - I never tried pypy or others. And somebody may set PYTHONOPTIMIZE and get pyo instead of pyc.
Maybe py could also vary, what about pyw on Windows? There could be more in the future.
A correct version could perhaps be
Compile Python source files to byte code
```
pip install -h
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/commands/install.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import operator
5 import os
6 import tempfile
7 import shutil
8 try:
9 import wheel
10 except ImportError:
11 wheel = None
12
13 from pip.req import RequirementSet
14 from pip.basecommand import RequirementCommand
15 from pip.locations import virtualenv_no_global, distutils_scheme
16 from pip.exceptions import (
17 InstallationError, CommandError, PreviousBuildDirError,
18 )
19 from pip import cmdoptions
20 from pip.utils import ensure_dir, get_installed_version
21 from pip.utils.build import BuildDirectory
22 from pip.utils.filesystem import check_path_owner
23 from pip.wheel import WheelCache, WheelBuilder
24
25
26 logger = logging.getLogger(__name__)
27
28
29 class InstallCommand(RequirementCommand):
30 """
31 Install packages from:
32
33 - PyPI (and other indexes) using requirement specifiers.
34 - VCS project urls.
35 - Local project directories.
36 - Local or remote source archives.
37
38 pip also supports installing from "requirements files", which provide
39 an easy way to specify a whole environment to be installed.
40 """
41 name = 'install'
42
43 usage = """
44 %prog [options] <requirement specifier> [package-index-options] ...
45 %prog [options] -r <requirements file> [package-index-options] ...
46 %prog [options] [-e] <vcs project url> ...
47 %prog [options] [-e] <local project path> ...
48 %prog [options] <archive url/path> ..."""
49
50 summary = 'Install packages.'
51
52 def __init__(self, *args, **kw):
53 super(InstallCommand, self).__init__(*args, **kw)
54
55 cmd_opts = self.cmd_opts
56
57 cmd_opts.add_option(cmdoptions.requirements())
58 cmd_opts.add_option(cmdoptions.constraints())
59 cmd_opts.add_option(cmdoptions.no_deps())
60 cmd_opts.add_option(cmdoptions.pre())
61
62 cmd_opts.add_option(cmdoptions.editable())
63 cmd_opts.add_option(
64 '-t', '--target',
65 dest='target_dir',
66 metavar='dir',
67 default=None,
68 help='Install packages into <dir>. '
69 'By default this will not replace existing files/folders in '
70 '<dir>. Use --upgrade to replace existing packages in <dir> '
71 'with new versions.'
72 )
73 cmd_opts.add_option(
74 '--user',
75 dest='use_user_site',
76 action='store_true',
77 help="Install to the Python user install directory for your "
78 "platform. Typically ~/.local/, or %APPDATA%\\Python on "
79 "Windows. (See the Python documentation for site.USER_BASE "
80 "for full details.)")
81 cmd_opts.add_option(
82 '--root',
83 dest='root_path',
84 metavar='dir',
85 default=None,
86 help="Install everything relative to this alternate root "
87 "directory.")
88 cmd_opts.add_option(
89 '--prefix',
90 dest='prefix_path',
91 metavar='dir',
92 default=None,
93 help="Installation prefix where lib, bin and other top-level "
94 "folders are placed")
95
96 cmd_opts.add_option(cmdoptions.build_dir())
97
98 cmd_opts.add_option(cmdoptions.src())
99
100 cmd_opts.add_option(
101 '-U', '--upgrade',
102 dest='upgrade',
103 action='store_true',
104 help='Upgrade all specified packages to the newest available '
105 'version. The handling of dependencies depends on the '
106 'upgrade-strategy used.'
107 )
108
109 cmd_opts.add_option(
110 '--upgrade-strategy',
111 dest='upgrade_strategy',
112 default='eager',
113 choices=['only-if-needed', 'eager'],
114 help='Determines how dependency upgrading should be handled '
115 '(default: %(default)s). '
116 '"eager" - dependencies are upgraded regardless of '
117 'whether the currently installed version satisfies the '
118 'requirements of the upgraded package(s). '
119 '"only-if-needed" - are upgraded only when they do not '
120 'satisfy the requirements of the upgraded package(s).'
121 )
122
123 cmd_opts.add_option(
124 '--force-reinstall',
125 dest='force_reinstall',
126 action='store_true',
127 help='When upgrading, reinstall all packages even if they are '
128 'already up-to-date.')
129
130 cmd_opts.add_option(
131 '-I', '--ignore-installed',
132 dest='ignore_installed',
133 action='store_true',
134 help='Ignore the installed packages (reinstalling instead).')
135
136 cmd_opts.add_option(cmdoptions.ignore_requires_python())
137
138 cmd_opts.add_option(cmdoptions.install_options())
139 cmd_opts.add_option(cmdoptions.global_options())
140
141 cmd_opts.add_option(
142 "--compile",
143 action="store_true",
144 dest="compile",
145 default=True,
146 help="Compile py files to pyc",
147 )
148
149 cmd_opts.add_option(
150 "--no-compile",
151 action="store_false",
152 dest="compile",
153 help="Do not compile py files to pyc",
154 )
155
156 cmd_opts.add_option(cmdoptions.no_binary())
157 cmd_opts.add_option(cmdoptions.only_binary())
158 cmd_opts.add_option(cmdoptions.no_clean())
159 cmd_opts.add_option(cmdoptions.require_hashes())
160 cmd_opts.add_option(cmdoptions.progress_bar())
161
162 index_opts = cmdoptions.make_option_group(
163 cmdoptions.index_group,
164 self.parser,
165 )
166
167 self.parser.insert_option_group(0, index_opts)
168 self.parser.insert_option_group(0, cmd_opts)
169
170 def run(self, options, args):
171 cmdoptions.check_install_build_global(options)
172
173 if options.build_dir:
174 options.build_dir = os.path.abspath(options.build_dir)
175
176 options.src_dir = os.path.abspath(options.src_dir)
177 install_options = options.install_options or []
178 if options.use_user_site:
179 if options.prefix_path:
180 raise CommandError(
181 "Can not combine '--user' and '--prefix' as they imply "
182 "different installation locations"
183 )
184 if virtualenv_no_global():
185 raise InstallationError(
186 "Can not perform a '--user' install. User site-packages "
187 "are not visible in this virtualenv."
188 )
189 install_options.append('--user')
190 install_options.append('--prefix=')
191
192 temp_target_dir = None
193 if options.target_dir:
194 options.ignore_installed = True
195 temp_target_dir = tempfile.mkdtemp()
196 options.target_dir = os.path.abspath(options.target_dir)
197 if (os.path.exists(options.target_dir) and not
198 os.path.isdir(options.target_dir)):
199 raise CommandError(
200 "Target path exists but is not a directory, will not "
201 "continue."
202 )
203 install_options.append('--home=' + temp_target_dir)
204
205 global_options = options.global_options or []
206
207 with self._build_session(options) as session:
208
209 finder = self._build_package_finder(options, session)
210 build_delete = (not (options.no_clean or options.build_dir))
211 wheel_cache = WheelCache(options.cache_dir, options.format_control)
212 if options.cache_dir and not check_path_owner(options.cache_dir):
213 logger.warning(
214 "The directory '%s' or its parent directory is not owned "
215 "by the current user and caching wheels has been "
216 "disabled. check the permissions and owner of that "
217 "directory. If executing pip with sudo, you may want "
218 "sudo's -H flag.",
219 options.cache_dir,
220 )
221 options.cache_dir = None
222
223 with BuildDirectory(options.build_dir,
224 delete=build_delete) as build_dir:
225 requirement_set = RequirementSet(
226 build_dir=build_dir,
227 src_dir=options.src_dir,
228 upgrade=options.upgrade,
229 upgrade_strategy=options.upgrade_strategy,
230 ignore_installed=options.ignore_installed,
231 ignore_dependencies=options.ignore_dependencies,
232 ignore_requires_python=options.ignore_requires_python,
233 force_reinstall=options.force_reinstall,
234 use_user_site=options.use_user_site,
235 target_dir=temp_target_dir,
236 session=session,
237 pycompile=options.compile,
238 isolated=options.isolated_mode,
239 wheel_cache=wheel_cache,
240 require_hashes=options.require_hashes,
241 progress_bar=options.progress_bar,
242 )
243
244 self.populate_requirement_set(
245 requirement_set, args, options, finder, session, self.name,
246 wheel_cache
247 )
248
249 try:
250 if (not wheel or not options.cache_dir):
251 # on -d don't do complex things like building
252 # wheels, and don't try to build wheels when wheel is
253 # not installed.
254 requirement_set.prepare_files(finder)
255 else:
256 # build wheels before install.
257 wb = WheelBuilder(
258 requirement_set,
259 finder,
260 build_options=[],
261 global_options=[],
262 )
263 # Ignore the result: a failed wheel will be
264 # installed from the sdist/vcs whatever.
265 wb.build(autobuilding=True)
266
267 requirement_set.install(
268 install_options,
269 global_options,
270 root=options.root_path,
271 prefix=options.prefix_path,
272 )
273
274 possible_lib_locations = get_lib_location_guesses(
275 user=options.use_user_site,
276 home=temp_target_dir,
277 root=options.root_path,
278 prefix=options.prefix_path,
279 isolated=options.isolated_mode,
280 )
281 reqs = sorted(
282 requirement_set.successfully_installed,
283 key=operator.attrgetter('name'))
284 items = []
285 for req in reqs:
286 item = req.name
287 try:
288 installed_version = get_installed_version(
289 req.name, possible_lib_locations
290 )
291 if installed_version:
292 item += '-' + installed_version
293 except Exception:
294 pass
295 items.append(item)
296 installed = ' '.join(items)
297 if installed:
298 logger.info('Successfully installed %s', installed)
299 except PreviousBuildDirError:
300 options.no_clean = True
301 raise
302 finally:
303 # Clean up
304 if not options.no_clean:
305 requirement_set.cleanup_files()
306
307 if options.target_dir:
308 ensure_dir(options.target_dir)
309
310 # Checking both purelib and platlib directories for installed
311 # packages to be moved to target directory
312 lib_dir_list = []
313
314 purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
315 platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']
316 data_dir = distutils_scheme('', home=temp_target_dir)['data']
317
318 if os.path.exists(purelib_dir):
319 lib_dir_list.append(purelib_dir)
320 if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
321 lib_dir_list.append(platlib_dir)
322 if os.path.exists(data_dir):
323 lib_dir_list.append(data_dir)
324
325 for lib_dir in lib_dir_list:
326 for item in os.listdir(lib_dir):
327 if lib_dir == data_dir:
328 ddir = os.path.join(data_dir, item)
329 if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
330 continue
331 target_item_dir = os.path.join(options.target_dir, item)
332 if os.path.exists(target_item_dir):
333 if not options.upgrade:
334 logger.warning(
335 'Target directory %s already exists. Specify '
336 '--upgrade to force replacement.',
337 target_item_dir
338 )
339 continue
340 if os.path.islink(target_item_dir):
341 logger.warning(
342 'Target directory %s already exists and is '
343 'a link. Pip will not automatically replace '
344 'links, please remove if replacement is '
345 'desired.',
346 target_item_dir
347 )
348 continue
349 if os.path.isdir(target_item_dir):
350 shutil.rmtree(target_item_dir)
351 else:
352 os.remove(target_item_dir)
353
354 shutil.move(
355 os.path.join(lib_dir, item),
356 target_item_dir
357 )
358 shutil.rmtree(temp_target_dir)
359 return requirement_set
360
361
362 def get_lib_location_guesses(*args, **kwargs):
363 scheme = distutils_scheme('', *args, **kwargs)
364 return [scheme['purelib'], scheme['platlib']]
365
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/commands/install.py b/pip/commands/install.py
--- a/pip/commands/install.py
+++ b/pip/commands/install.py
@@ -143,14 +143,14 @@
action="store_true",
dest="compile",
default=True,
- help="Compile py files to pyc",
+ help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
- help="Do not compile py files to pyc",
+ help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(cmdoptions.no_binary())
| {"golden_diff": "diff --git a/pip/commands/install.py b/pip/commands/install.py\n--- a/pip/commands/install.py\n+++ b/pip/commands/install.py\n@@ -143,14 +143,14 @@\n action=\"store_true\",\n dest=\"compile\",\n default=True,\n- help=\"Compile py files to pyc\",\n+ help=\"Compile Python source files to bytecode\",\n )\n \n cmd_opts.add_option(\n \"--no-compile\",\n action=\"store_false\",\n dest=\"compile\",\n- help=\"Do not compile py files to pyc\",\n+ help=\"Do not compile Python source files to bytecode\",\n )\n \n cmd_opts.add_option(cmdoptions.no_binary())\n", "issue": "pip install -h output is wrong about pyc\n* Pip version: 9.0.1\r\n* Python version: 3.5\r\n* Operating system: Linux\r\n\r\n### Description:\r\npip install -h says\r\n\r\n --compile Compile py files to pyc\r\n --no-compile Do not compile py files to pyc\r\n\r\npyc is only correct for some Python interpreters. Not for Python, not for windows - I never tried pypy or others. And somebody may set PYTHONOPTIMIZE and get pyo instead of pyc.\r\n\r\nMaybe py could also vary, what about pyw on Windows? There could be more in the future.\r\n\r\nA correct version could perhaps be\r\n\r\nCompile Python source files to byte code\r\n\r\n```\r\npip install -h\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport operator\nimport os\nimport tempfile\nimport shutil\ntry:\n import wheel\nexcept ImportError:\n wheel = None\n\nfrom pip.req import RequirementSet\nfrom pip.basecommand import RequirementCommand\nfrom pip.locations import virtualenv_no_global, distutils_scheme\nfrom pip.exceptions import (\n InstallationError, CommandError, PreviousBuildDirError,\n)\nfrom pip import cmdoptions\nfrom pip.utils import ensure_dir, get_installed_version\nfrom pip.utils.build import BuildDirectory\nfrom pip.utils.filesystem import check_path_owner\nfrom pip.wheel import WheelCache, WheelBuilder\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InstallCommand(RequirementCommand):\n \"\"\"\n Install packages from:\n\n - PyPI (and other indexes) using requirement specifiers.\n - VCS project urls.\n - Local project directories.\n - Local or remote source archives.\n\n pip also supports installing from \"requirements files\", which provide\n an easy way to specify a whole environment to be installed.\n \"\"\"\n name = 'install'\n\n usage = \"\"\"\n %prog [options] <requirement specifier> [package-index-options] ...\n %prog [options] -r <requirements file> [package-index-options] ...\n %prog [options] [-e] <vcs project url> ...\n %prog [options] [-e] <local project path> ...\n %prog [options] <archive url/path> ...\"\"\"\n\n summary = 'Install packages.'\n\n def __init__(self, *args, **kw):\n super(InstallCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(cmdoptions.requirements())\n cmd_opts.add_option(cmdoptions.constraints())\n cmd_opts.add_option(cmdoptions.no_deps())\n cmd_opts.add_option(cmdoptions.pre())\n\n cmd_opts.add_option(cmdoptions.editable())\n cmd_opts.add_option(\n '-t', '--target',\n dest='target_dir',\n metavar='dir',\n default=None,\n help='Install packages into <dir>. '\n 'By default this will not replace existing files/folders in '\n '<dir>. Use --upgrade to replace existing packages in <dir> '\n 'with new versions.'\n )\n cmd_opts.add_option(\n '--user',\n dest='use_user_site',\n action='store_true',\n help=\"Install to the Python user install directory for your \"\n \"platform. Typically ~/.local/, or %APPDATA%\\\\Python on \"\n \"Windows. (See the Python documentation for site.USER_BASE \"\n \"for full details.)\")\n cmd_opts.add_option(\n '--root',\n dest='root_path',\n metavar='dir',\n default=None,\n help=\"Install everything relative to this alternate root \"\n \"directory.\")\n cmd_opts.add_option(\n '--prefix',\n dest='prefix_path',\n metavar='dir',\n default=None,\n help=\"Installation prefix where lib, bin and other top-level \"\n \"folders are placed\")\n\n cmd_opts.add_option(cmdoptions.build_dir())\n\n cmd_opts.add_option(cmdoptions.src())\n\n cmd_opts.add_option(\n '-U', '--upgrade',\n dest='upgrade',\n action='store_true',\n help='Upgrade all specified packages to the newest available '\n 'version. The handling of dependencies depends on the '\n 'upgrade-strategy used.'\n )\n\n cmd_opts.add_option(\n '--upgrade-strategy',\n dest='upgrade_strategy',\n default='eager',\n choices=['only-if-needed', 'eager'],\n help='Determines how dependency upgrading should be handled '\n '(default: %(default)s). '\n '\"eager\" - dependencies are upgraded regardless of '\n 'whether the currently installed version satisfies the '\n 'requirements of the upgraded package(s). '\n '\"only-if-needed\" - are upgraded only when they do not '\n 'satisfy the requirements of the upgraded package(s).'\n )\n\n cmd_opts.add_option(\n '--force-reinstall',\n dest='force_reinstall',\n action='store_true',\n help='When upgrading, reinstall all packages even if they are '\n 'already up-to-date.')\n\n cmd_opts.add_option(\n '-I', '--ignore-installed',\n dest='ignore_installed',\n action='store_true',\n help='Ignore the installed packages (reinstalling instead).')\n\n cmd_opts.add_option(cmdoptions.ignore_requires_python())\n\n cmd_opts.add_option(cmdoptions.install_options())\n cmd_opts.add_option(cmdoptions.global_options())\n\n cmd_opts.add_option(\n \"--compile\",\n action=\"store_true\",\n dest=\"compile\",\n default=True,\n help=\"Compile py files to pyc\",\n )\n\n cmd_opts.add_option(\n \"--no-compile\",\n action=\"store_false\",\n dest=\"compile\",\n help=\"Do not compile py files to pyc\",\n )\n\n cmd_opts.add_option(cmdoptions.no_binary())\n cmd_opts.add_option(cmdoptions.only_binary())\n cmd_opts.add_option(cmdoptions.no_clean())\n cmd_opts.add_option(cmdoptions.require_hashes())\n cmd_opts.add_option(cmdoptions.progress_bar())\n\n index_opts = cmdoptions.make_option_group(\n cmdoptions.index_group,\n self.parser,\n )\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def run(self, options, args):\n cmdoptions.check_install_build_global(options)\n\n if options.build_dir:\n options.build_dir = os.path.abspath(options.build_dir)\n\n options.src_dir = os.path.abspath(options.src_dir)\n install_options = options.install_options or []\n if options.use_user_site:\n if options.prefix_path:\n raise CommandError(\n \"Can not combine '--user' and '--prefix' as they imply \"\n \"different installation locations\"\n )\n if virtualenv_no_global():\n raise InstallationError(\n \"Can not perform a '--user' install. User site-packages \"\n \"are not visible in this virtualenv.\"\n )\n install_options.append('--user')\n install_options.append('--prefix=')\n\n temp_target_dir = None\n if options.target_dir:\n options.ignore_installed = True\n temp_target_dir = tempfile.mkdtemp()\n options.target_dir = os.path.abspath(options.target_dir)\n if (os.path.exists(options.target_dir) and not\n os.path.isdir(options.target_dir)):\n raise CommandError(\n \"Target path exists but is not a directory, will not \"\n \"continue.\"\n )\n install_options.append('--home=' + temp_target_dir)\n\n global_options = options.global_options or []\n\n with self._build_session(options) as session:\n\n finder = self._build_package_finder(options, session)\n build_delete = (not (options.no_clean or options.build_dir))\n wheel_cache = WheelCache(options.cache_dir, options.format_control)\n if options.cache_dir and not check_path_owner(options.cache_dir):\n logger.warning(\n \"The directory '%s' or its parent directory is not owned \"\n \"by the current user and caching wheels has been \"\n \"disabled. check the permissions and owner of that \"\n \"directory. If executing pip with sudo, you may want \"\n \"sudo's -H flag.\",\n options.cache_dir,\n )\n options.cache_dir = None\n\n with BuildDirectory(options.build_dir,\n delete=build_delete) as build_dir:\n requirement_set = RequirementSet(\n build_dir=build_dir,\n src_dir=options.src_dir,\n upgrade=options.upgrade,\n upgrade_strategy=options.upgrade_strategy,\n ignore_installed=options.ignore_installed,\n ignore_dependencies=options.ignore_dependencies,\n ignore_requires_python=options.ignore_requires_python,\n force_reinstall=options.force_reinstall,\n use_user_site=options.use_user_site,\n target_dir=temp_target_dir,\n session=session,\n pycompile=options.compile,\n isolated=options.isolated_mode,\n wheel_cache=wheel_cache,\n require_hashes=options.require_hashes,\n progress_bar=options.progress_bar,\n )\n\n self.populate_requirement_set(\n requirement_set, args, options, finder, session, self.name,\n wheel_cache\n )\n\n try:\n if (not wheel or not options.cache_dir):\n # on -d don't do complex things like building\n # wheels, and don't try to build wheels when wheel is\n # not installed.\n requirement_set.prepare_files(finder)\n else:\n # build wheels before install.\n wb = WheelBuilder(\n requirement_set,\n finder,\n build_options=[],\n global_options=[],\n )\n # Ignore the result: a failed wheel will be\n # installed from the sdist/vcs whatever.\n wb.build(autobuilding=True)\n\n requirement_set.install(\n install_options,\n global_options,\n root=options.root_path,\n prefix=options.prefix_path,\n )\n\n possible_lib_locations = get_lib_location_guesses(\n user=options.use_user_site,\n home=temp_target_dir,\n root=options.root_path,\n prefix=options.prefix_path,\n isolated=options.isolated_mode,\n )\n reqs = sorted(\n requirement_set.successfully_installed,\n key=operator.attrgetter('name'))\n items = []\n for req in reqs:\n item = req.name\n try:\n installed_version = get_installed_version(\n req.name, possible_lib_locations\n )\n if installed_version:\n item += '-' + installed_version\n except Exception:\n pass\n items.append(item)\n installed = ' '.join(items)\n if installed:\n logger.info('Successfully installed %s', installed)\n except PreviousBuildDirError:\n options.no_clean = True\n raise\n finally:\n # Clean up\n if not options.no_clean:\n requirement_set.cleanup_files()\n\n if options.target_dir:\n ensure_dir(options.target_dir)\n\n # Checking both purelib and platlib directories for installed\n # packages to be moved to target directory\n lib_dir_list = []\n\n purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']\n platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']\n data_dir = distutils_scheme('', home=temp_target_dir)['data']\n\n if os.path.exists(purelib_dir):\n lib_dir_list.append(purelib_dir)\n if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:\n lib_dir_list.append(platlib_dir)\n if os.path.exists(data_dir):\n lib_dir_list.append(data_dir)\n\n for lib_dir in lib_dir_list:\n for item in os.listdir(lib_dir):\n if lib_dir == data_dir:\n ddir = os.path.join(data_dir, item)\n if any(s.startswith(ddir) for s in lib_dir_list[:-1]):\n continue\n target_item_dir = os.path.join(options.target_dir, item)\n if os.path.exists(target_item_dir):\n if not options.upgrade:\n logger.warning(\n 'Target directory %s already exists. Specify '\n '--upgrade to force replacement.',\n target_item_dir\n )\n continue\n if os.path.islink(target_item_dir):\n logger.warning(\n 'Target directory %s already exists and is '\n 'a link. Pip will not automatically replace '\n 'links, please remove if replacement is '\n 'desired.',\n target_item_dir\n )\n continue\n if os.path.isdir(target_item_dir):\n shutil.rmtree(target_item_dir)\n else:\n os.remove(target_item_dir)\n\n shutil.move(\n os.path.join(lib_dir, item),\n target_item_dir\n )\n shutil.rmtree(temp_target_dir)\n return requirement_set\n\n\ndef get_lib_location_guesses(*args, **kwargs):\n scheme = distutils_scheme('', *args, **kwargs)\n return [scheme['purelib'], scheme['platlib']]\n", "path": "pip/commands/install.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport operator\nimport os\nimport tempfile\nimport shutil\ntry:\n import wheel\nexcept ImportError:\n wheel = None\n\nfrom pip.req import RequirementSet\nfrom pip.basecommand import RequirementCommand\nfrom pip.locations import virtualenv_no_global, distutils_scheme\nfrom pip.exceptions import (\n InstallationError, CommandError, PreviousBuildDirError,\n)\nfrom pip import cmdoptions\nfrom pip.utils import ensure_dir, get_installed_version\nfrom pip.utils.build import BuildDirectory\nfrom pip.utils.filesystem import check_path_owner\nfrom pip.wheel import WheelCache, WheelBuilder\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InstallCommand(RequirementCommand):\n \"\"\"\n Install packages from:\n\n - PyPI (and other indexes) using requirement specifiers.\n - VCS project urls.\n - Local project directories.\n - Local or remote source archives.\n\n pip also supports installing from \"requirements files\", which provide\n an easy way to specify a whole environment to be installed.\n \"\"\"\n name = 'install'\n\n usage = \"\"\"\n %prog [options] <requirement specifier> [package-index-options] ...\n %prog [options] -r <requirements file> [package-index-options] ...\n %prog [options] [-e] <vcs project url> ...\n %prog [options] [-e] <local project path> ...\n %prog [options] <archive url/path> ...\"\"\"\n\n summary = 'Install packages.'\n\n def __init__(self, *args, **kw):\n super(InstallCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(cmdoptions.requirements())\n cmd_opts.add_option(cmdoptions.constraints())\n cmd_opts.add_option(cmdoptions.no_deps())\n cmd_opts.add_option(cmdoptions.pre())\n\n cmd_opts.add_option(cmdoptions.editable())\n cmd_opts.add_option(\n '-t', '--target',\n dest='target_dir',\n metavar='dir',\n default=None,\n help='Install packages into <dir>. '\n 'By default this will not replace existing files/folders in '\n '<dir>. Use --upgrade to replace existing packages in <dir> '\n 'with new versions.'\n )\n cmd_opts.add_option(\n '--user',\n dest='use_user_site',\n action='store_true',\n help=\"Install to the Python user install directory for your \"\n \"platform. Typically ~/.local/, or %APPDATA%\\\\Python on \"\n \"Windows. (See the Python documentation for site.USER_BASE \"\n \"for full details.)\")\n cmd_opts.add_option(\n '--root',\n dest='root_path',\n metavar='dir',\n default=None,\n help=\"Install everything relative to this alternate root \"\n \"directory.\")\n cmd_opts.add_option(\n '--prefix',\n dest='prefix_path',\n metavar='dir',\n default=None,\n help=\"Installation prefix where lib, bin and other top-level \"\n \"folders are placed\")\n\n cmd_opts.add_option(cmdoptions.build_dir())\n\n cmd_opts.add_option(cmdoptions.src())\n\n cmd_opts.add_option(\n '-U', '--upgrade',\n dest='upgrade',\n action='store_true',\n help='Upgrade all specified packages to the newest available '\n 'version. The handling of dependencies depends on the '\n 'upgrade-strategy used.'\n )\n\n cmd_opts.add_option(\n '--upgrade-strategy',\n dest='upgrade_strategy',\n default='eager',\n choices=['only-if-needed', 'eager'],\n help='Determines how dependency upgrading should be handled '\n '(default: %(default)s). '\n '\"eager\" - dependencies are upgraded regardless of '\n 'whether the currently installed version satisfies the '\n 'requirements of the upgraded package(s). '\n '\"only-if-needed\" - are upgraded only when they do not '\n 'satisfy the requirements of the upgraded package(s).'\n )\n\n cmd_opts.add_option(\n '--force-reinstall',\n dest='force_reinstall',\n action='store_true',\n help='When upgrading, reinstall all packages even if they are '\n 'already up-to-date.')\n\n cmd_opts.add_option(\n '-I', '--ignore-installed',\n dest='ignore_installed',\n action='store_true',\n help='Ignore the installed packages (reinstalling instead).')\n\n cmd_opts.add_option(cmdoptions.ignore_requires_python())\n\n cmd_opts.add_option(cmdoptions.install_options())\n cmd_opts.add_option(cmdoptions.global_options())\n\n cmd_opts.add_option(\n \"--compile\",\n action=\"store_true\",\n dest=\"compile\",\n default=True,\n help=\"Compile Python source files to bytecode\",\n )\n\n cmd_opts.add_option(\n \"--no-compile\",\n action=\"store_false\",\n dest=\"compile\",\n help=\"Do not compile Python source files to bytecode\",\n )\n\n cmd_opts.add_option(cmdoptions.no_binary())\n cmd_opts.add_option(cmdoptions.only_binary())\n cmd_opts.add_option(cmdoptions.no_clean())\n cmd_opts.add_option(cmdoptions.require_hashes())\n cmd_opts.add_option(cmdoptions.progress_bar())\n\n index_opts = cmdoptions.make_option_group(\n cmdoptions.index_group,\n self.parser,\n )\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def run(self, options, args):\n cmdoptions.check_install_build_global(options)\n\n if options.build_dir:\n options.build_dir = os.path.abspath(options.build_dir)\n\n options.src_dir = os.path.abspath(options.src_dir)\n install_options = options.install_options or []\n if options.use_user_site:\n if options.prefix_path:\n raise CommandError(\n \"Can not combine '--user' and '--prefix' as they imply \"\n \"different installation locations\"\n )\n if virtualenv_no_global():\n raise InstallationError(\n \"Can not perform a '--user' install. User site-packages \"\n \"are not visible in this virtualenv.\"\n )\n install_options.append('--user')\n install_options.append('--prefix=')\n\n temp_target_dir = None\n if options.target_dir:\n options.ignore_installed = True\n temp_target_dir = tempfile.mkdtemp()\n options.target_dir = os.path.abspath(options.target_dir)\n if (os.path.exists(options.target_dir) and not\n os.path.isdir(options.target_dir)):\n raise CommandError(\n \"Target path exists but is not a directory, will not \"\n \"continue.\"\n )\n install_options.append('--home=' + temp_target_dir)\n\n global_options = options.global_options or []\n\n with self._build_session(options) as session:\n\n finder = self._build_package_finder(options, session)\n build_delete = (not (options.no_clean or options.build_dir))\n wheel_cache = WheelCache(options.cache_dir, options.format_control)\n if options.cache_dir and not check_path_owner(options.cache_dir):\n logger.warning(\n \"The directory '%s' or its parent directory is not owned \"\n \"by the current user and caching wheels has been \"\n \"disabled. check the permissions and owner of that \"\n \"directory. If executing pip with sudo, you may want \"\n \"sudo's -H flag.\",\n options.cache_dir,\n )\n options.cache_dir = None\n\n with BuildDirectory(options.build_dir,\n delete=build_delete) as build_dir:\n requirement_set = RequirementSet(\n build_dir=build_dir,\n src_dir=options.src_dir,\n upgrade=options.upgrade,\n upgrade_strategy=options.upgrade_strategy,\n ignore_installed=options.ignore_installed,\n ignore_dependencies=options.ignore_dependencies,\n ignore_requires_python=options.ignore_requires_python,\n force_reinstall=options.force_reinstall,\n use_user_site=options.use_user_site,\n target_dir=temp_target_dir,\n session=session,\n pycompile=options.compile,\n isolated=options.isolated_mode,\n wheel_cache=wheel_cache,\n require_hashes=options.require_hashes,\n progress_bar=options.progress_bar,\n )\n\n self.populate_requirement_set(\n requirement_set, args, options, finder, session, self.name,\n wheel_cache\n )\n\n try:\n if (not wheel or not options.cache_dir):\n # on -d don't do complex things like building\n # wheels, and don't try to build wheels when wheel is\n # not installed.\n requirement_set.prepare_files(finder)\n else:\n # build wheels before install.\n wb = WheelBuilder(\n requirement_set,\n finder,\n build_options=[],\n global_options=[],\n )\n # Ignore the result: a failed wheel will be\n # installed from the sdist/vcs whatever.\n wb.build(autobuilding=True)\n\n requirement_set.install(\n install_options,\n global_options,\n root=options.root_path,\n prefix=options.prefix_path,\n )\n\n possible_lib_locations = get_lib_location_guesses(\n user=options.use_user_site,\n home=temp_target_dir,\n root=options.root_path,\n prefix=options.prefix_path,\n isolated=options.isolated_mode,\n )\n reqs = sorted(\n requirement_set.successfully_installed,\n key=operator.attrgetter('name'))\n items = []\n for req in reqs:\n item = req.name\n try:\n installed_version = get_installed_version(\n req.name, possible_lib_locations\n )\n if installed_version:\n item += '-' + installed_version\n except Exception:\n pass\n items.append(item)\n installed = ' '.join(items)\n if installed:\n logger.info('Successfully installed %s', installed)\n except PreviousBuildDirError:\n options.no_clean = True\n raise\n finally:\n # Clean up\n if not options.no_clean:\n requirement_set.cleanup_files()\n\n if options.target_dir:\n ensure_dir(options.target_dir)\n\n # Checking both purelib and platlib directories for installed\n # packages to be moved to target directory\n lib_dir_list = []\n\n purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']\n platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']\n data_dir = distutils_scheme('', home=temp_target_dir)['data']\n\n if os.path.exists(purelib_dir):\n lib_dir_list.append(purelib_dir)\n if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:\n lib_dir_list.append(platlib_dir)\n if os.path.exists(data_dir):\n lib_dir_list.append(data_dir)\n\n for lib_dir in lib_dir_list:\n for item in os.listdir(lib_dir):\n if lib_dir == data_dir:\n ddir = os.path.join(data_dir, item)\n if any(s.startswith(ddir) for s in lib_dir_list[:-1]):\n continue\n target_item_dir = os.path.join(options.target_dir, item)\n if os.path.exists(target_item_dir):\n if not options.upgrade:\n logger.warning(\n 'Target directory %s already exists. Specify '\n '--upgrade to force replacement.',\n target_item_dir\n )\n continue\n if os.path.islink(target_item_dir):\n logger.warning(\n 'Target directory %s already exists and is '\n 'a link. Pip will not automatically replace '\n 'links, please remove if replacement is '\n 'desired.',\n target_item_dir\n )\n continue\n if os.path.isdir(target_item_dir):\n shutil.rmtree(target_item_dir)\n else:\n os.remove(target_item_dir)\n\n shutil.move(\n os.path.join(lib_dir, item),\n target_item_dir\n )\n shutil.rmtree(temp_target_dir)\n return requirement_set\n\n\ndef get_lib_location_guesses(*args, **kwargs):\n scheme = distutils_scheme('', *args, **kwargs)\n return [scheme['purelib'], scheme['platlib']]\n", "path": "pip/commands/install.py"}]} | 3,915 | 154 |
gh_patches_debug_17712 | rasdani/github-patches | git_diff | pypa__virtualenv-1509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Integration with virtualenvwrapper
Congratulations on the rewrite.
I notice that `virtualenvwrapper` doesn't integrate with the new beta:
```
$ mkvirtualenv foo
ERROR: Environment '/Users/brettmz/.virtualenvs/foo' does not contain an activate script.
```
Sure enough - I believe it is looking for a script `activate` - but the script file now depends on the shell - `activate.sh` etc.
It would be good if this could work somehow - would it be reasonable to create / link a default `activate` script for this case? Or should virtualenvwrapper be updated?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/activation/bash/__init__.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 from virtualenv.util.path import Path
4
5 from ..via_template import ViaTemplateActivator
6
7
8 class BashActivator(ViaTemplateActivator):
9 @classmethod
10 def supports(cls, interpreter):
11 return interpreter.os != "nt"
12
13 def templates(self):
14 yield Path("activate.sh")
15
```
Path: `src/virtualenv/activation/via_template.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import sys
5 from abc import ABCMeta, abstractmethod
6
7 import six
8
9 from .activator import Activator
10
11 if sys.version_info >= (3, 7):
12 from importlib.resources import read_text
13 else:
14 from importlib_resources import read_text
15
16
17 @six.add_metaclass(ABCMeta)
18 class ViaTemplateActivator(Activator):
19 @abstractmethod
20 def templates(self):
21 raise NotImplementedError
22
23 def generate(self, creator):
24 dest_folder = creator.bin_dir
25 replacements = self.replacements(creator, dest_folder)
26 self._generate(replacements, self.templates(), dest_folder, creator)
27 if self.flag_prompt is not None:
28 creator.pyenv_cfg["prompt"] = self.flag_prompt
29
30 def replacements(self, creator, dest_folder):
31 return {
32 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
33 "__VIRTUAL_ENV__": six.ensure_text(str(creator.dest)),
34 "__VIRTUAL_NAME__": creator.env_name,
35 "__BIN_NAME__": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
36 "__PATH_SEP__": six.ensure_text(os.pathsep),
37 }
38
39 def _generate(self, replacements, templates, to_folder, creator):
40 for template in templates:
41 text = self.instantiate_template(replacements, template, creator)
42 (to_folder / template).write_text(text, encoding="utf-8")
43
44 def instantiate_template(self, replacements, template, creator):
45 # read text and do replacements
46 text = read_text(self.__module__, str(template), encoding="utf-8", errors="strict")
47 for key, value in replacements.items():
48 value = self._repr_unicode(creator, value)
49 text = text.replace(key, value)
50 return text
51
52 @staticmethod
53 def _repr_unicode(creator, value):
54 # by default we just let it be unicode
55 return value
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/virtualenv/activation/bash/__init__.py b/src/virtualenv/activation/bash/__init__.py
--- a/src/virtualenv/activation/bash/__init__.py
+++ b/src/virtualenv/activation/bash/__init__.py
@@ -12,3 +12,6 @@
def templates(self):
yield Path("activate.sh")
+
+ def as_name(self, template):
+ return template.stem
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
--- a/src/virtualenv/activation/via_template.py
+++ b/src/virtualenv/activation/via_template.py
@@ -39,7 +39,11 @@
def _generate(self, replacements, templates, to_folder, creator):
for template in templates:
text = self.instantiate_template(replacements, template, creator)
- (to_folder / template).write_text(text, encoding="utf-8")
+ dest = to_folder / self.as_name(template)
+ dest.write_text(text, encoding="utf-8")
+
+ def as_name(self, template):
+ return template.name
def instantiate_template(self, replacements, template, creator):
# read text and do replacements
| {"golden_diff": "diff --git a/src/virtualenv/activation/bash/__init__.py b/src/virtualenv/activation/bash/__init__.py\n--- a/src/virtualenv/activation/bash/__init__.py\n+++ b/src/virtualenv/activation/bash/__init__.py\n@@ -12,3 +12,6 @@\n \n def templates(self):\n yield Path(\"activate.sh\")\n+\n+ def as_name(self, template):\n+ return template.stem\ndiff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -39,7 +39,11 @@\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n- (to_folder / template).write_text(text, encoding=\"utf-8\")\n+ dest = to_folder / self.as_name(template)\n+ dest.write_text(text, encoding=\"utf-8\")\n+\n+ def as_name(self, template):\n+ return template.name\n \n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n", "issue": "Integration with virtualenvwrapper\nCongratulations on the rewrite.\r\n\r\nI notice that `virtualenvwrapper` doesn't integrate with the new beta:\r\n\r\n```\r\n$ mkvirtualenv foo\r\nERROR: Environment '/Users/brettmz/.virtualenvs/foo' does not contain an activate script.\r\n```\r\n\r\nSure enough - I believe it is looking for a script `activate` - but the script file now depends on the shell - `activate.sh` etc. \r\n\r\nIt would be good if this could work somehow - would it be reasonable to create / link a default `activate` script for this case? Or should virtualenvwrapper be updated?\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom virtualenv.util.path import Path\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass BashActivator(ViaTemplateActivator):\n @classmethod\n def supports(cls, interpreter):\n return interpreter.os != \"nt\"\n\n def templates(self):\n yield Path(\"activate.sh\")\n", "path": "src/virtualenv/activation/bash/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nimport six\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\[email protected]_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": six.ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": six.ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n (to_folder / template).write_text(text, encoding=\"utf-8\")\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom virtualenv.util.path import Path\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass BashActivator(ViaTemplateActivator):\n @classmethod\n def supports(cls, interpreter):\n return interpreter.os != \"nt\"\n\n def templates(self):\n yield Path(\"activate.sh\")\n\n def as_name(self, template):\n return template.stem\n", "path": "src/virtualenv/activation/bash/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nimport six\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\[email protected]_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": six.ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": six.ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]} | 1,036 | 276 |
gh_patches_debug_30204 | rasdani/github-patches | git_diff | ipython__ipython-9854 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot close last figure window
After updating ipython to 5.0 (from macports), I cannot close last figure window (neither with button in window bar, nor with 'close()'). The mouse cursor becomes spinning wheel inside the window, but I can still type command in the terminal window.
However, if I type clf(), the figure window closes and a new one appears.
This happens only when there is only one figure window remaining.
Here's my system (OS X El Capitan, with python/ipython etc. installed via macports) :
```
[~] $ python -c "import IPython; print(IPython.sys_info())"
{'commit_hash': u'<not found>',
'commit_source': '(none found)',
'default_encoding': 'UTF-8',
'ipython_path': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython',
'ipython_version': '5.0.0',
'os_name': 'posix',
'platform': 'Darwin-15.5.0-x86_64-i386-64bit',
'sys_executable': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python',
'sys_platform': 'darwin',
'sys_version': '2.7.12 (default, Jun 29 2016, 12:52:38) \n[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)]'}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/terminal/pt_inputhooks/osx.py`
Content:
```
1 """Inputhook for OS X
2
3 Calls NSApp / CoreFoundation APIs via ctypes.
4 """
5
6 # obj-c boilerplate from appnope, used under BSD 2-clause
7
8 import ctypes
9 import ctypes.util
10
11 objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))
12
13 void_p = ctypes.c_void_p
14
15 objc.objc_getClass.restype = void_p
16 objc.sel_registerName.restype = void_p
17 objc.objc_msgSend.restype = void_p
18 objc.objc_msgSend.argtypes = [void_p, void_p]
19
20 msg = objc.objc_msgSend
21
22 def _utf8(s):
23 """ensure utf8 bytes"""
24 if not isinstance(s, bytes):
25 s = s.encode('utf8')
26 return s
27
28 def n(name):
29 """create a selector name (for ObjC methods)"""
30 return objc.sel_registerName(_utf8(name))
31
32 def C(classname):
33 """get an ObjC Class by name"""
34 return objc.objc_getClass(_utf8(classname))
35
36 # end obj-c boilerplate from appnope
37
38 # CoreFoundation C-API calls we will use:
39 CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))
40
41 CFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate
42 CFFileDescriptorCreate.restype = void_p
43 CFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p]
44
45 CFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor
46 CFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int
47 CFFileDescriptorGetNativeDescriptor.argtypes = [void_p]
48
49 CFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks
50 CFFileDescriptorEnableCallBacks.restype = None
51 CFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]
52
53 CFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource
54 CFFileDescriptorCreateRunLoopSource.restype = void_p
55 CFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]
56
57 CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
58 CFRunLoopGetCurrent.restype = void_p
59
60 CFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource
61 CFRunLoopAddSource.restype = None
62 CFRunLoopAddSource.argtypes = [void_p, void_p, void_p]
63
64 CFRelease = CoreFoundation.CFRelease
65 CFRelease.restype = None
66 CFRelease.argtypes = [void_p]
67
68 CFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate
69 CFFileDescriptorInvalidate.restype = None
70 CFFileDescriptorInvalidate.argtypes = [void_p]
71
72 # From CFFileDescriptor.h
73 kCFFileDescriptorReadCallBack = 1
74 kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')
75
76
77 def _NSApp():
78 """Return the global NSApplication instance (NSApp)"""
79 return msg(C('NSApplication'), n('sharedApplication'))
80
81
82 def _wake(NSApp):
83 """Wake the Application"""
84 event = msg(C('NSEvent'),
85 n('otherEventWithType:location:modifierFlags:'
86 'timestamp:windowNumber:context:subtype:data1:data2:'),
87 15, # Type
88 0, # location
89 0, # flags
90 0, # timestamp
91 0, # window
92 None, # context
93 0, # subtype
94 0, # data1
95 0, # data2
96 )
97 msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
98
99
100 def _input_callback(fdref, flags, info):
101 """Callback to fire when there's input to be read"""
102 CFFileDescriptorInvalidate(fdref)
103 CFRelease(fdref)
104 NSApp = _NSApp()
105 msg(NSApp, n('stop:'), NSApp)
106 _wake(NSApp)
107
108 _c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)
109 _c_input_callback = _c_callback_func_type(_input_callback)
110
111
112 def _stop_on_read(fd):
113 """Register callback to stop eventloop when there's data on fd"""
114 fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)
115 CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)
116 source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)
117 loop = CFRunLoopGetCurrent()
118 CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)
119 CFRelease(source)
120
121
122 def inputhook(context):
123 """Inputhook for Cocoa (NSApp)"""
124 NSApp = _NSApp()
125 window_count = msg(
126 msg(NSApp, n('windows')),
127 n('count')
128 )
129 if not window_count:
130 return
131 _stop_on_read(context.fileno())
132 msg(NSApp, n('run'))
133
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/terminal/pt_inputhooks/osx.py b/IPython/terminal/pt_inputhooks/osx.py
--- a/IPython/terminal/pt_inputhooks/osx.py
+++ b/IPython/terminal/pt_inputhooks/osx.py
@@ -7,6 +7,7 @@
import ctypes
import ctypes.util
+from threading import Event
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))
@@ -97,8 +98,11 @@
msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
+_triggered = Event()
+
def _input_callback(fdref, flags, info):
"""Callback to fire when there's input to be read"""
+ _triggered.set()
CFFileDescriptorInvalidate(fdref)
CFRelease(fdref)
NSApp = _NSApp()
@@ -111,6 +115,7 @@
def _stop_on_read(fd):
"""Register callback to stop eventloop when there's data on fd"""
+ _triggered.clear()
fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)
CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)
source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)
@@ -130,4 +135,9 @@
return
_stop_on_read(context.fileno())
msg(NSApp, n('run'))
-
+ if not _triggered.is_set():
+ # app closed without firing callback,
+ # probably due to last window being closed.
+ # Run the loop manually in this case,
+ # since there may be events still to process (#9734)
+ CoreFoundation.CFRunLoopRun()
| {"golden_diff": "diff --git a/IPython/terminal/pt_inputhooks/osx.py b/IPython/terminal/pt_inputhooks/osx.py\n--- a/IPython/terminal/pt_inputhooks/osx.py\n+++ b/IPython/terminal/pt_inputhooks/osx.py\n@@ -7,6 +7,7 @@\n \n import ctypes\n import ctypes.util\n+from threading import Event\n \n objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))\n \n@@ -97,8 +98,11 @@\n msg(NSApp, n('postEvent:atStart:'), void_p(event), True)\n \n \n+_triggered = Event()\n+\n def _input_callback(fdref, flags, info):\n \"\"\"Callback to fire when there's input to be read\"\"\"\n+ _triggered.set()\n CFFileDescriptorInvalidate(fdref)\n CFRelease(fdref)\n NSApp = _NSApp()\n@@ -111,6 +115,7 @@\n \n def _stop_on_read(fd):\n \"\"\"Register callback to stop eventloop when there's data on fd\"\"\"\n+ _triggered.clear()\n fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)\n CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)\n source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)\n@@ -130,4 +135,9 @@\n return\n _stop_on_read(context.fileno())\n msg(NSApp, n('run'))\n-\n+ if not _triggered.is_set():\n+ # app closed without firing callback,\n+ # probably due to last window being closed.\n+ # Run the loop manually in this case,\n+ # since there may be events still to process (#9734)\n+ CoreFoundation.CFRunLoopRun()\n", "issue": "Cannot close last figure window\nAfter updating ipython to 5.0 (from macports), I cannot close last figure window (neither with button in window bar, nor with 'close()'). The mouse cursor becomes spinning wheel inside the window, but I can still type command in the terminal window.\n\nHowever, if I type clf(), the figure window closes and a new one appears.\n\nThis happens only when there is only one figure window remaining. \n\nHere's my system (OS X El Capitan, with python/ipython etc. installed via macports) :\n\n```\n[~] $ python -c \"import IPython; print(IPython.sys_info())\"\n{'commit_hash': u'<not found>',\n 'commit_source': '(none found)',\n 'default_encoding': 'UTF-8',\n 'ipython_path': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython',\n 'ipython_version': '5.0.0',\n 'os_name': 'posix',\n 'platform': 'Darwin-15.5.0-x86_64-i386-64bit',\n 'sys_executable': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python',\n 'sys_platform': 'darwin',\n 'sys_version': '2.7.12 (default, Jun 29 2016, 12:52:38) \\n[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)]'}\n```\n\n", "before_files": [{"content": "\"\"\"Inputhook for OS X\n\nCalls NSApp / CoreFoundation APIs via ctypes.\n\"\"\"\n\n# obj-c boilerplate from appnope, used under BSD 2-clause\n\nimport ctypes\nimport ctypes.util\n\nobjc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))\n\nvoid_p = ctypes.c_void_p\n\nobjc.objc_getClass.restype = void_p\nobjc.sel_registerName.restype = void_p\nobjc.objc_msgSend.restype = void_p\nobjc.objc_msgSend.argtypes = [void_p, void_p]\n\nmsg = objc.objc_msgSend\n\ndef _utf8(s):\n \"\"\"ensure utf8 bytes\"\"\"\n if not isinstance(s, bytes):\n s = s.encode('utf8')\n return s\n\ndef n(name):\n \"\"\"create a selector name (for ObjC methods)\"\"\"\n return objc.sel_registerName(_utf8(name))\n\ndef C(classname):\n \"\"\"get an ObjC Class by name\"\"\"\n return objc.objc_getClass(_utf8(classname))\n\n# end obj-c boilerplate from appnope\n\n# CoreFoundation C-API calls we will use:\nCoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))\n\nCFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate\nCFFileDescriptorCreate.restype = void_p\nCFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p]\n\nCFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor\nCFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int\nCFFileDescriptorGetNativeDescriptor.argtypes = [void_p]\n\nCFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks\nCFFileDescriptorEnableCallBacks.restype = None\nCFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]\n\nCFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource\nCFFileDescriptorCreateRunLoopSource.restype = void_p\nCFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]\n\nCFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent\nCFRunLoopGetCurrent.restype = void_p\n\nCFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource\nCFRunLoopAddSource.restype = None\nCFRunLoopAddSource.argtypes = [void_p, void_p, void_p]\n\nCFRelease = CoreFoundation.CFRelease\nCFRelease.restype = None\nCFRelease.argtypes = [void_p]\n\nCFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate\nCFFileDescriptorInvalidate.restype = None\nCFFileDescriptorInvalidate.argtypes = [void_p]\n\n# From CFFileDescriptor.h\nkCFFileDescriptorReadCallBack = 1\nkCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')\n\n\ndef _NSApp():\n \"\"\"Return the global NSApplication instance (NSApp)\"\"\"\n return msg(C('NSApplication'), n('sharedApplication'))\n\n\ndef _wake(NSApp):\n \"\"\"Wake the Application\"\"\"\n event = msg(C('NSEvent'),\n n('otherEventWithType:location:modifierFlags:'\n 'timestamp:windowNumber:context:subtype:data1:data2:'),\n 15, # Type\n 0, # location\n 0, # flags\n 0, # timestamp\n 0, # window\n None, # context\n 0, # subtype\n 0, # data1\n 0, # data2\n )\n msg(NSApp, n('postEvent:atStart:'), void_p(event), True)\n\n\ndef _input_callback(fdref, flags, info):\n \"\"\"Callback to fire when there's input to be read\"\"\"\n CFFileDescriptorInvalidate(fdref)\n CFRelease(fdref)\n NSApp = _NSApp()\n msg(NSApp, n('stop:'), NSApp)\n _wake(NSApp)\n\n_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)\n_c_input_callback = _c_callback_func_type(_input_callback)\n\n\ndef _stop_on_read(fd):\n \"\"\"Register callback to stop eventloop when there's data on fd\"\"\"\n fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)\n CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)\n source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)\n loop = CFRunLoopGetCurrent()\n CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)\n CFRelease(source)\n\n\ndef inputhook(context):\n \"\"\"Inputhook for Cocoa (NSApp)\"\"\"\n NSApp = _NSApp()\n window_count = msg(\n msg(NSApp, n('windows')),\n n('count')\n )\n if not window_count:\n return\n _stop_on_read(context.fileno())\n msg(NSApp, n('run'))\n\n", "path": "IPython/terminal/pt_inputhooks/osx.py"}], "after_files": [{"content": "\"\"\"Inputhook for OS X\n\nCalls NSApp / CoreFoundation APIs via ctypes.\n\"\"\"\n\n# obj-c boilerplate from appnope, used under BSD 2-clause\n\nimport ctypes\nimport ctypes.util\nfrom threading import Event\n\nobjc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))\n\nvoid_p = ctypes.c_void_p\n\nobjc.objc_getClass.restype = void_p\nobjc.sel_registerName.restype = void_p\nobjc.objc_msgSend.restype = void_p\nobjc.objc_msgSend.argtypes = [void_p, void_p]\n\nmsg = objc.objc_msgSend\n\ndef _utf8(s):\n \"\"\"ensure utf8 bytes\"\"\"\n if not isinstance(s, bytes):\n s = s.encode('utf8')\n return s\n\ndef n(name):\n \"\"\"create a selector name (for ObjC methods)\"\"\"\n return objc.sel_registerName(_utf8(name))\n\ndef C(classname):\n \"\"\"get an ObjC Class by name\"\"\"\n return objc.objc_getClass(_utf8(classname))\n\n# end obj-c boilerplate from appnope\n\n# CoreFoundation C-API calls we will use:\nCoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))\n\nCFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate\nCFFileDescriptorCreate.restype = void_p\nCFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p]\n\nCFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor\nCFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int\nCFFileDescriptorGetNativeDescriptor.argtypes = [void_p]\n\nCFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks\nCFFileDescriptorEnableCallBacks.restype = None\nCFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]\n\nCFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource\nCFFileDescriptorCreateRunLoopSource.restype = void_p\nCFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]\n\nCFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent\nCFRunLoopGetCurrent.restype = void_p\n\nCFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource\nCFRunLoopAddSource.restype = None\nCFRunLoopAddSource.argtypes = [void_p, void_p, void_p]\n\nCFRelease = CoreFoundation.CFRelease\nCFRelease.restype = None\nCFRelease.argtypes = [void_p]\n\nCFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate\nCFFileDescriptorInvalidate.restype = None\nCFFileDescriptorInvalidate.argtypes = [void_p]\n\n# From CFFileDescriptor.h\nkCFFileDescriptorReadCallBack = 1\nkCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')\n\n\ndef _NSApp():\n \"\"\"Return the global NSApplication instance (NSApp)\"\"\"\n return msg(C('NSApplication'), n('sharedApplication'))\n\n\ndef _wake(NSApp):\n \"\"\"Wake the Application\"\"\"\n event = msg(C('NSEvent'),\n n('otherEventWithType:location:modifierFlags:'\n 'timestamp:windowNumber:context:subtype:data1:data2:'),\n 15, # Type\n 0, # location\n 0, # flags\n 0, # timestamp\n 0, # window\n None, # context\n 0, # subtype\n 0, # data1\n 0, # data2\n )\n msg(NSApp, n('postEvent:atStart:'), void_p(event), True)\n\n\n_triggered = Event()\n\ndef _input_callback(fdref, flags, info):\n \"\"\"Callback to fire when there's input to be read\"\"\"\n _triggered.set()\n CFFileDescriptorInvalidate(fdref)\n CFRelease(fdref)\n NSApp = _NSApp()\n msg(NSApp, n('stop:'), NSApp)\n _wake(NSApp)\n\n_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)\n_c_input_callback = _c_callback_func_type(_input_callback)\n\n\ndef _stop_on_read(fd):\n \"\"\"Register callback to stop eventloop when there's data on fd\"\"\"\n _triggered.clear()\n fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)\n CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)\n source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)\n loop = CFRunLoopGetCurrent()\n CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)\n CFRelease(source)\n\n\ndef inputhook(context):\n \"\"\"Inputhook for Cocoa (NSApp)\"\"\"\n NSApp = _NSApp()\n window_count = msg(\n msg(NSApp, n('windows')),\n n('count')\n )\n if not window_count:\n return\n _stop_on_read(context.fileno())\n msg(NSApp, n('run'))\n if not _triggered.is_set():\n # app closed without firing callback,\n # probably due to last window being closed.\n # Run the loop manually in this case,\n # since there may be events still to process (#9734)\n CoreFoundation.CFRunLoopRun()\n", "path": "IPython/terminal/pt_inputhooks/osx.py"}]} | 1,973 | 393 |
gh_patches_debug_40491 | rasdani/github-patches | git_diff | CTFd__CTFd-298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Advice on E-mail Mechanism
It seems that when E-mail server and E-mail confirmation are enabled, CTFd will send a new E-mail to the E-mail address input by the new user every time he or she logged in without confirmation yet (At least my SMTP server does like that, I will be apologize if it is my own wrong configuration with my own server). I think it will be better that only one E-mail will be sent at the first time new user signs up, and provide a button allowing the unconfirmed user to receive a new confirmation E-mail if needed.
Anyway, Thanks for your excellent project!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/auth.py`
Content:
```
1 import logging
2 import os
3 import re
4 import time
5 import urllib
6
7 from flask import current_app as app, render_template, request, redirect, url_for, session, Blueprint
8 from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
9 from passlib.hash import bcrypt_sha256
10
11 from CTFd.models import db, Teams
12 from CTFd import utils
13
14 auth = Blueprint('auth', __name__)
15
16
17 @auth.route('/confirm', methods=['POST', 'GET'])
18 @auth.route('/confirm/<data>', methods=['GET'])
19 def confirm_user(data=None):
20 if not utils.get_config('verify_emails'):
21 return redirect(url_for('challenges.challenges_view'))
22 if data and request.method == "GET": # User is confirming email account
23 try:
24 s = Signer(app.config['SECRET_KEY'])
25 email = s.unsign(urllib.unquote_plus(data.decode('base64')))
26 except BadSignature:
27 return render_template('confirm.html', errors=['Your confirmation link seems wrong'])
28 except:
29 return render_template('confirm.html', errors=['Your link appears broken, please try again.'])
30 team = Teams.query.filter_by(email=email).first_or_404()
31 team.verified = True
32 db.session.commit()
33 logger = logging.getLogger('regs')
34 logger.warn("[{0}] {1} confirmed {2}".format(time.strftime("%m/%d/%Y %X"), team.name.encode('utf-8'), team.email.encode('utf-8')))
35 db.session.close()
36 if utils.authed():
37 return redirect(url_for('challenges.challenges_view'))
38 return redirect(url_for('auth.login'))
39 if not data and request.method == "GET": # User has been directed to the confirm page because his account is not verified
40 if not utils.authed():
41 return redirect(url_for('auth.login'))
42 team = Teams.query.filter_by(id=session['id']).first_or_404()
43 if team.verified:
44 return redirect(url_for('views.profile'))
45 else:
46 utils.verify_email(team.email)
47 return render_template('confirm.html', team=team)
48
49
50 @auth.route('/reset_password', methods=['POST', 'GET'])
51 @auth.route('/reset_password/<data>', methods=['POST', 'GET'])
52 def reset_password(data=None):
53 if data is not None and request.method == "GET":
54 return render_template('reset_password.html', mode='set')
55 if data is not None and request.method == "POST":
56 try:
57 s = TimedSerializer(app.config['SECRET_KEY'])
58 name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)
59 except BadTimeSignature:
60 return render_template('reset_password.html', errors=['Your link has expired'])
61 except:
62 return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])
63 team = Teams.query.filter_by(name=name).first_or_404()
64 team.password = bcrypt_sha256.encrypt(request.form['password'].strip())
65 db.session.commit()
66 db.session.close()
67 return redirect(url_for('auth.login'))
68
69 if request.method == 'POST':
70 email = request.form['email'].strip()
71 team = Teams.query.filter_by(email=email).first()
72 if not team:
73 return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
74 s = TimedSerializer(app.config['SECRET_KEY'])
75 token = s.dumps(team.name)
76 text = """
77 Did you initiate a password reset?
78
79 {0}/{1}
80
81 """.format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))
82
83 utils.sendmail(email, text)
84
85 return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
86 return render_template('reset_password.html')
87
88
89 @auth.route('/register', methods=['POST', 'GET'])
90 def register():
91 if not utils.can_register():
92 return redirect(url_for('auth.login'))
93 if request.method == 'POST':
94 errors = []
95 name = request.form['name']
96 email = request.form['email']
97 password = request.form['password']
98
99 name_len = len(name) == 0
100 names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()
101 emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()
102 pass_short = len(password) == 0
103 pass_long = len(password) > 128
104 valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request.form['email'])
105
106 if not valid_email:
107 errors.append("That email doesn't look right")
108 if names:
109 errors.append('That team name is already taken')
110 if emails:
111 errors.append('That email has already been used')
112 if pass_short:
113 errors.append('Pick a longer password')
114 if pass_long:
115 errors.append('Pick a shorter password')
116 if name_len:
117 errors.append('Pick a longer team name')
118
119 if len(errors) > 0:
120 return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])
121 else:
122 with app.app_context():
123 team = Teams(name, email.lower(), password)
124 db.session.add(team)
125 db.session.commit()
126 db.session.flush()
127
128 session['username'] = team.name
129 session['id'] = team.id
130 session['admin'] = team.admin
131 session['nonce'] = utils.sha512(os.urandom(10))
132
133 if utils.can_send_mail() and utils.get_config('verify_emails'): # Confirming users is enabled and we can send email.
134 db.session.close()
135 logger = logging.getLogger('regs')
136 logger.warn("[{0}] {1} registered (UNCONFIRMED) with {2}".format(time.strftime("%m/%d/%Y %X"),
137 request.form['name'].encode('utf-8'),
138 request.form['email'].encode('utf-8')))
139 return redirect(url_for('auth.confirm_user'))
140 else: # Don't care about confirming users
141 if utils.can_send_mail(): # We want to notify the user that they have registered.
142 utils.sendmail(request.form['email'], "You've successfully registered for {}".format(utils.get_config('ctf_name')))
143
144 db.session.close()
145
146 logger = logging.getLogger('regs')
147 logger.warn("[{0}] {1} registered with {2}".format(time.strftime("%m/%d/%Y %X"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))
148 return redirect(url_for('challenges.challenges_view'))
149 else:
150 return render_template('register.html')
151
152
153 @auth.route('/login', methods=['POST', 'GET'])
154 def login():
155 if request.method == 'POST':
156 errors = []
157 name = request.form['name']
158 team = Teams.query.filter_by(name=name).first()
159 if team:
160 if team and bcrypt_sha256.verify(request.form['password'], team.password):
161 try:
162 session.regenerate() # NO SESSION FIXATION FOR YOU
163 except:
164 pass # TODO: Some session objects don't implement regenerate :(
165 session['username'] = team.name
166 session['id'] = team.id
167 session['admin'] = team.admin
168 session['nonce'] = utils.sha512(os.urandom(10))
169 db.session.close()
170
171 logger = logging.getLogger('logins')
172 logger.warn("[{0}] {1} logged in".format(time.strftime("%m/%d/%Y %X"), session['username'].encode('utf-8')))
173
174 if request.args.get('next') and utils.is_safe_url(request.args.get('next')):
175 return redirect(request.args.get('next'))
176 return redirect(url_for('challenges.challenges_view'))
177 else: # This user exists but the password is wrong
178 errors.append("Your username or password is incorrect")
179 db.session.close()
180 return render_template('login.html', errors=errors)
181 else: # This user just doesn't exist
182 errors.append("Your username or password is incorrect")
183 db.session.close()
184 return render_template('login.html', errors=errors)
185 else:
186 db.session.close()
187 return render_template('login.html')
188
189
190 @auth.route('/logout')
191 def logout():
192 if utils.authed():
193 session.clear()
194 return redirect(url_for('views.static_html'))
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/auth.py b/CTFd/auth.py
--- a/CTFd/auth.py
+++ b/CTFd/auth.py
@@ -18,8 +18,11 @@
@auth.route('/confirm/<data>', methods=['GET'])
def confirm_user(data=None):
if not utils.get_config('verify_emails'):
+ # If the CTF doesn't care about confirming email addresses then redierct to challenges
return redirect(url_for('challenges.challenges_view'))
- if data and request.method == "GET": # User is confirming email account
+
+ # User is confirming email account
+ if data and request.method == "GET":
try:
s = Signer(app.config['SECRET_KEY'])
email = s.unsign(urllib.unquote_plus(data.decode('base64')))
@@ -36,15 +39,28 @@
if utils.authed():
return redirect(url_for('challenges.challenges_view'))
return redirect(url_for('auth.login'))
- if not data and request.method == "GET": # User has been directed to the confirm page because his account is not verified
- if not utils.authed():
- return redirect(url_for('auth.login'))
- team = Teams.query.filter_by(id=session['id']).first_or_404()
- if team.verified:
- return redirect(url_for('views.profile'))
- else:
- utils.verify_email(team.email)
- return render_template('confirm.html', team=team)
+
+ # User is trying to start or restart the confirmation flow
+ if not utils.authed():
+ return redirect(url_for('auth.login'))
+
+ team = Teams.query.filter_by(id=session['id']).first_or_404()
+
+ if data is None:
+ if request.method == "POST":
+ # User wants to resend their confirmation email
+ if team.verified:
+ return redirect(url_for('views.profile'))
+ else:
+ utils.verify_email(team.email)
+ return render_template('confirm.html', team=team, infos=['Your confirmation email has been resent!'])
+ elif request.method == "GET":
+ # User has been directed to the confirm page
+ team = Teams.query.filter_by(id=session['id']).first_or_404()
+ if team.verified:
+ # If user is already verified, redirect to their profile
+ return redirect(url_for('views.profile'))
+ return render_template('confirm.html', team=team)
@auth.route('/reset_password', methods=['POST', 'GET'])
@@ -136,6 +152,9 @@
logger.warn("[{0}] {1} registered (UNCONFIRMED) with {2}".format(time.strftime("%m/%d/%Y %X"),
request.form['name'].encode('utf-8'),
request.form['email'].encode('utf-8')))
+
+ utils.verify_email(team.email)
+
return redirect(url_for('auth.confirm_user'))
else: # Don't care about confirming users
if utils.can_send_mail(): # We want to notify the user that they have registered.
| {"golden_diff": "diff --git a/CTFd/auth.py b/CTFd/auth.py\n--- a/CTFd/auth.py\n+++ b/CTFd/auth.py\n@@ -18,8 +18,11 @@\n @auth.route('/confirm/<data>', methods=['GET'])\n def confirm_user(data=None):\n if not utils.get_config('verify_emails'):\n+ # If the CTF doesn't care about confirming email addresses then redierct to challenges\n return redirect(url_for('challenges.challenges_view'))\n- if data and request.method == \"GET\": # User is confirming email account\n+\n+ # User is confirming email account\n+ if data and request.method == \"GET\":\n try:\n s = Signer(app.config['SECRET_KEY'])\n email = s.unsign(urllib.unquote_plus(data.decode('base64')))\n@@ -36,15 +39,28 @@\n if utils.authed():\n return redirect(url_for('challenges.challenges_view'))\n return redirect(url_for('auth.login'))\n- if not data and request.method == \"GET\": # User has been directed to the confirm page because his account is not verified\n- if not utils.authed():\n- return redirect(url_for('auth.login'))\n- team = Teams.query.filter_by(id=session['id']).first_or_404()\n- if team.verified:\n- return redirect(url_for('views.profile'))\n- else:\n- utils.verify_email(team.email)\n- return render_template('confirm.html', team=team)\n+\n+ # User is trying to start or restart the confirmation flow\n+ if not utils.authed():\n+ return redirect(url_for('auth.login'))\n+\n+ team = Teams.query.filter_by(id=session['id']).first_or_404()\n+\n+ if data is None:\n+ if request.method == \"POST\":\n+ # User wants to resend their confirmation email\n+ if team.verified:\n+ return redirect(url_for('views.profile'))\n+ else:\n+ utils.verify_email(team.email)\n+ return render_template('confirm.html', team=team, infos=['Your confirmation email has been resent!'])\n+ elif request.method == \"GET\":\n+ # User has been directed to the confirm page\n+ team = Teams.query.filter_by(id=session['id']).first_or_404()\n+ if team.verified:\n+ # If user is already verified, redirect to their profile\n+ return redirect(url_for('views.profile'))\n+ return render_template('confirm.html', team=team)\n \n \n @auth.route('/reset_password', methods=['POST', 'GET'])\n@@ -136,6 +152,9 @@\n logger.warn(\"[{0}] {1} registered (UNCONFIRMED) with {2}\".format(time.strftime(\"%m/%d/%Y %X\"),\n request.form['name'].encode('utf-8'),\n request.form['email'].encode('utf-8')))\n+\n+ utils.verify_email(team.email)\n+\n return redirect(url_for('auth.confirm_user'))\n else: # Don't care about confirming users\n if utils.can_send_mail(): # We want to notify the user that they have registered.\n", "issue": "Advice on E-mail Mechanism\nIt seems that when E-mail server and E-mail confirmation are enabled, CTFd will send a new E-mail to the E-mail address input by the new user every time he or she logged in without confirmation yet (At least my SMTP server does like that, I will be apologize if it is my own wrong configuration with my own server). I think it will be better that only one E-mail will be sent at the first time new user signs up, and provide a button allowing the unconfirmed user to receive a new confirmation E-mail if needed.\r\n\r\nAnyway, Thanks for your excellent project!\n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport time\nimport urllib\n\nfrom flask import current_app as app, render_template, request, redirect, url_for, session, Blueprint\nfrom itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature\nfrom passlib.hash import bcrypt_sha256\n\nfrom CTFd.models import db, Teams\nfrom CTFd import utils\n\nauth = Blueprint('auth', __name__)\n\n\[email protected]('/confirm', methods=['POST', 'GET'])\[email protected]('/confirm/<data>', methods=['GET'])\ndef confirm_user(data=None):\n if not utils.get_config('verify_emails'):\n return redirect(url_for('challenges.challenges_view'))\n if data and request.method == \"GET\": # User is confirming email account\n try:\n s = Signer(app.config['SECRET_KEY'])\n email = s.unsign(urllib.unquote_plus(data.decode('base64')))\n except BadSignature:\n return render_template('confirm.html', errors=['Your confirmation link seems wrong'])\n except:\n return render_template('confirm.html', errors=['Your link appears broken, please try again.'])\n team = Teams.query.filter_by(email=email).first_or_404()\n team.verified = True\n db.session.commit()\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} confirmed {2}\".format(time.strftime(\"%m/%d/%Y %X\"), team.name.encode('utf-8'), team.email.encode('utf-8')))\n db.session.close()\n if utils.authed():\n return redirect(url_for('challenges.challenges_view'))\n return redirect(url_for('auth.login'))\n if not data and request.method == \"GET\": # User has been directed to the confirm page because his account is not verified\n if not utils.authed():\n return redirect(url_for('auth.login'))\n team = Teams.query.filter_by(id=session['id']).first_or_404()\n if team.verified:\n return redirect(url_for('views.profile'))\n else:\n utils.verify_email(team.email)\n return render_template('confirm.html', team=team)\n\n\[email protected]('/reset_password', methods=['POST', 'GET'])\[email protected]('/reset_password/<data>', methods=['POST', 'GET'])\ndef reset_password(data=None):\n if data is not None and request.method == \"GET\":\n return render_template('reset_password.html', mode='set')\n if data is not None and request.method == \"POST\":\n try:\n s = TimedSerializer(app.config['SECRET_KEY'])\n name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)\n except BadTimeSignature:\n return render_template('reset_password.html', errors=['Your link has expired'])\n except:\n return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])\n team = Teams.query.filter_by(name=name).first_or_404()\n team.password = bcrypt_sha256.encrypt(request.form['password'].strip())\n db.session.commit()\n db.session.close()\n return redirect(url_for('auth.login'))\n\n if request.method == 'POST':\n email = request.form['email'].strip()\n team = Teams.query.filter_by(email=email).first()\n if not team:\n return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])\n s = TimedSerializer(app.config['SECRET_KEY'])\n token = s.dumps(team.name)\n text = \"\"\"\nDid you initiate a password reset?\n\n{0}/{1}\n\n\"\"\".format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))\n\n utils.sendmail(email, text)\n\n return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])\n return render_template('reset_password.html')\n\n\[email protected]('/register', methods=['POST', 'GET'])\ndef register():\n if not utils.can_register():\n return redirect(url_for('auth.login'))\n if request.method == 'POST':\n errors = []\n name = request.form['name']\n email = request.form['email']\n password = request.form['password']\n\n name_len = len(name) == 0\n names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()\n emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()\n pass_short = len(password) == 0\n pass_long = len(password) > 128\n valid_email = re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", request.form['email'])\n\n if not valid_email:\n errors.append(\"That email doesn't look right\")\n if names:\n errors.append('That team name is already taken')\n if emails:\n errors.append('That email has already been used')\n if pass_short:\n errors.append('Pick a longer password')\n if pass_long:\n errors.append('Pick a shorter password')\n if name_len:\n errors.append('Pick a longer team name')\n\n if len(errors) > 0:\n return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])\n else:\n with app.app_context():\n team = Teams(name, email.lower(), password)\n db.session.add(team)\n db.session.commit()\n db.session.flush()\n\n session['username'] = team.name\n session['id'] = team.id\n session['admin'] = team.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n\n if utils.can_send_mail() and utils.get_config('verify_emails'): # Confirming users is enabled and we can send email.\n db.session.close()\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} registered (UNCONFIRMED) with {2}\".format(time.strftime(\"%m/%d/%Y %X\"),\n request.form['name'].encode('utf-8'),\n request.form['email'].encode('utf-8')))\n return redirect(url_for('auth.confirm_user'))\n else: # Don't care about confirming users\n if utils.can_send_mail(): # We want to notify the user that they have registered.\n utils.sendmail(request.form['email'], \"You've successfully registered for {}\".format(utils.get_config('ctf_name')))\n\n db.session.close()\n\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} registered with {2}\".format(time.strftime(\"%m/%d/%Y %X\"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))\n return redirect(url_for('challenges.challenges_view'))\n else:\n return render_template('register.html')\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n errors = []\n name = request.form['name']\n team = Teams.query.filter_by(name=name).first()\n if team:\n if team and bcrypt_sha256.verify(request.form['password'], team.password):\n try:\n session.regenerate() # NO SESSION FIXATION FOR YOU\n except:\n pass # TODO: Some session objects don't implement regenerate :(\n session['username'] = team.name\n session['id'] = team.id\n session['admin'] = team.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n db.session.close()\n\n logger = logging.getLogger('logins')\n logger.warn(\"[{0}] {1} logged in\".format(time.strftime(\"%m/%d/%Y %X\"), session['username'].encode('utf-8')))\n\n if request.args.get('next') and utils.is_safe_url(request.args.get('next')):\n return redirect(request.args.get('next'))\n return redirect(url_for('challenges.challenges_view'))\n else: # This user exists but the password is wrong\n errors.append(\"Your username or password is incorrect\")\n db.session.close()\n return render_template('login.html', errors=errors)\n else: # This user just doesn't exist\n errors.append(\"Your username or password is incorrect\")\n db.session.close()\n return render_template('login.html', errors=errors)\n else:\n db.session.close()\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n if utils.authed():\n session.clear()\n return redirect(url_for('views.static_html'))\n", "path": "CTFd/auth.py"}], "after_files": [{"content": "import logging\nimport os\nimport re\nimport time\nimport urllib\n\nfrom flask import current_app as app, render_template, request, redirect, url_for, session, Blueprint\nfrom itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature\nfrom passlib.hash import bcrypt_sha256\n\nfrom CTFd.models import db, Teams\nfrom CTFd import utils\n\nauth = Blueprint('auth', __name__)\n\n\[email protected]('/confirm', methods=['POST', 'GET'])\[email protected]('/confirm/<data>', methods=['GET'])\ndef confirm_user(data=None):\n if not utils.get_config('verify_emails'):\n # If the CTF doesn't care about confirming email addresses then redierct to challenges\n return redirect(url_for('challenges.challenges_view'))\n\n # User is confirming email account\n if data and request.method == \"GET\":\n try:\n s = Signer(app.config['SECRET_KEY'])\n email = s.unsign(urllib.unquote_plus(data.decode('base64')))\n except BadSignature:\n return render_template('confirm.html', errors=['Your confirmation link seems wrong'])\n except:\n return render_template('confirm.html', errors=['Your link appears broken, please try again.'])\n team = Teams.query.filter_by(email=email).first_or_404()\n team.verified = True\n db.session.commit()\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} confirmed {2}\".format(time.strftime(\"%m/%d/%Y %X\"), team.name.encode('utf-8'), team.email.encode('utf-8')))\n db.session.close()\n if utils.authed():\n return redirect(url_for('challenges.challenges_view'))\n return redirect(url_for('auth.login'))\n\n # User is trying to start or restart the confirmation flow\n if not utils.authed():\n return redirect(url_for('auth.login'))\n\n team = Teams.query.filter_by(id=session['id']).first_or_404()\n\n if data is None:\n if request.method == \"POST\":\n # User wants to resend their confirmation email\n if team.verified:\n return redirect(url_for('views.profile'))\n else:\n utils.verify_email(team.email)\n return render_template('confirm.html', team=team, infos=['Your confirmation email has been resent!'])\n elif request.method == \"GET\":\n # User has been directed to the confirm page\n team = Teams.query.filter_by(id=session['id']).first_or_404()\n if team.verified:\n # If user is already verified, redirect to their profile\n return redirect(url_for('views.profile'))\n return render_template('confirm.html', team=team)\n\n\[email protected]('/reset_password', methods=['POST', 'GET'])\[email protected]('/reset_password/<data>', methods=['POST', 'GET'])\ndef reset_password(data=None):\n if data is not None and request.method == \"GET\":\n return render_template('reset_password.html', mode='set')\n if data is not None and request.method == \"POST\":\n try:\n s = TimedSerializer(app.config['SECRET_KEY'])\n name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)\n except BadTimeSignature:\n return render_template('reset_password.html', errors=['Your link has expired'])\n except:\n return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])\n team = Teams.query.filter_by(name=name).first_or_404()\n team.password = bcrypt_sha256.encrypt(request.form['password'].strip())\n db.session.commit()\n db.session.close()\n return redirect(url_for('auth.login'))\n\n if request.method == 'POST':\n email = request.form['email'].strip()\n team = Teams.query.filter_by(email=email).first()\n if not team:\n return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])\n s = TimedSerializer(app.config['SECRET_KEY'])\n token = s.dumps(team.name)\n text = \"\"\"\nDid you initiate a password reset?\n\n{0}/{1}\n\n\"\"\".format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))\n\n utils.sendmail(email, text)\n\n return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])\n return render_template('reset_password.html')\n\n\[email protected]('/register', methods=['POST', 'GET'])\ndef register():\n if not utils.can_register():\n return redirect(url_for('auth.login'))\n if request.method == 'POST':\n errors = []\n name = request.form['name']\n email = request.form['email']\n password = request.form['password']\n\n name_len = len(name) == 0\n names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()\n emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()\n pass_short = len(password) == 0\n pass_long = len(password) > 128\n valid_email = re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", request.form['email'])\n\n if not valid_email:\n errors.append(\"That email doesn't look right\")\n if names:\n errors.append('That team name is already taken')\n if emails:\n errors.append('That email has already been used')\n if pass_short:\n errors.append('Pick a longer password')\n if pass_long:\n errors.append('Pick a shorter password')\n if name_len:\n errors.append('Pick a longer team name')\n\n if len(errors) > 0:\n return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])\n else:\n with app.app_context():\n team = Teams(name, email.lower(), password)\n db.session.add(team)\n db.session.commit()\n db.session.flush()\n\n session['username'] = team.name\n session['id'] = team.id\n session['admin'] = team.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n\n if utils.can_send_mail() and utils.get_config('verify_emails'): # Confirming users is enabled and we can send email.\n db.session.close()\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} registered (UNCONFIRMED) with {2}\".format(time.strftime(\"%m/%d/%Y %X\"),\n request.form['name'].encode('utf-8'),\n request.form['email'].encode('utf-8')))\n\n utils.verify_email(team.email)\n\n return redirect(url_for('auth.confirm_user'))\n else: # Don't care about confirming users\n if utils.can_send_mail(): # We want to notify the user that they have registered.\n utils.sendmail(request.form['email'], \"You've successfully registered for {}\".format(utils.get_config('ctf_name')))\n\n db.session.close()\n\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} registered with {2}\".format(time.strftime(\"%m/%d/%Y %X\"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))\n return redirect(url_for('challenges.challenges_view'))\n else:\n return render_template('register.html')\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n errors = []\n name = request.form['name']\n team = Teams.query.filter_by(name=name).first()\n if team:\n if team and bcrypt_sha256.verify(request.form['password'], team.password):\n try:\n session.regenerate() # NO SESSION FIXATION FOR YOU\n except:\n pass # TODO: Some session objects don't implement regenerate :(\n session['username'] = team.name\n session['id'] = team.id\n session['admin'] = team.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n db.session.close()\n\n logger = logging.getLogger('logins')\n logger.warn(\"[{0}] {1} logged in\".format(time.strftime(\"%m/%d/%Y %X\"), session['username'].encode('utf-8')))\n\n if request.args.get('next') and utils.is_safe_url(request.args.get('next')):\n return redirect(request.args.get('next'))\n return redirect(url_for('challenges.challenges_view'))\n else: # This user exists but the password is wrong\n errors.append(\"Your username or password is incorrect\")\n db.session.close()\n return render_template('login.html', errors=errors)\n else: # This user just doesn't exist\n errors.append(\"Your username or password is incorrect\")\n db.session.close()\n return render_template('login.html', errors=errors)\n else:\n db.session.close()\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n if utils.authed():\n session.clear()\n return redirect(url_for('views.static_html'))\n", "path": "CTFd/auth.py"}]} | 2,729 | 687 |
gh_patches_debug_41219 | rasdani/github-patches | git_diff | beeware__toga-863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Progressbar crash on start
## Expected Behavior
<!--- If you're describing a bug, tell us what you expect to happen. -->
Progress bar to load
<!--- If you're requesting a new feature, tell us why you'd like this feature. -->
## Current Behavior
<!--- If you're describing a bug, what currently happens? -->
```
python -m progressbar
Unhandled Exception: Python.Runtime.PythonException: TypeError : 'NoneType' value cannot be converted to System.Int32
at Python.Runtime.Dispatcher.Dispatch(ArrayList args)
at __System_Threading_ThreadStartDispatcher.Invoke()
at System.Threading.ThreadHelper.ThreadStart_Context(Object state)
at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)
at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)
at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state)
at System.Threading.ThreadHelper.ThreadStart()
```
## Steps to reproduce
<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->
1. launch progressbar
## Your Environment
<!--- Provide details on your current environment you found the bug in -->
* Python Version (list the specific version number) 3.6.5
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
- [ ] macOS - version:
- [ ] Linux - distro: - version:
- [ * ] Windows - version: Win10 1607
- [ ] Other - name: - version:
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [ * ] winforms
- [ ] win32
- [ ] Other (please specify)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/winforms/toga_winforms/widgets/progressbar.py`
Content:
```
1 from travertino.size import at_least
2
3 from toga_winforms.libs import WinForms
4
5 from .base import Widget
6
7
8 class ProgressBar(Widget):
9 def create(self):
10 self.native = WinForms.ProgressBar()
11
12 def start(self):
13 '''Not supported for WinForms implementation'''
14 self.interface.factory.not_implemented('ProgressBar.start()')
15
16 # possible implementation (not tested):
17 # self.native.Style = ProgressBarStyle.Marquee
18
19 def stop(self):
20 '''Not supported for WinForms implementation'''
21 self.interface.factory.not_implemented('ProgressBar.stop()')
22
23 # possible implementation (not tested):
24 # self.native.Style = ProgressBarStyle.Continuous
25
26 def set_max(self, value):
27 self.native.Maximum = value
28
29 def set_value(self, value):
30 self.native.Value = value
31
32 def rehint(self):
33 # Height must be non-zero
34 # Set a sensible min-width
35 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
36 self.interface.intrinsic.height = self.native.PreferredSize.Height
37
```
Path: `examples/progressbar/progressbar/app.py`
Content:
```
1 import toga
2 from toga.style import Pack
3 from toga.constants import ROW, COLUMN
4
5
6 class ProgressBarApp(toga.App):
7
8 def startup(self):
9 # Main window of the application with title and size
10 self.main_window = toga.MainWindow(title=self.name, size=(500, 500))
11
12 # the user may change the value with +/- buttons
13 self.progress_adder = toga.ProgressBar()
14
15 # the user may switch between "running" mode and a set value
16 self.progress_runner = toga.ProgressBar(max=None)
17
18 # set up common styles
19 label_style = Pack(flex=1, padding_right=24)
20 row_box_style = Pack(direction=ROW, padding=24)
21 col_box_style = Pack(direction=COLUMN, padding=24)
22
23 # Add the content on the main window
24 self.main_window.content = toga.Box(style=col_box_style, children=[
25 toga.Box(style=col_box_style, children=[
26 toga.Label("Use the +/- buttons to change the progress",
27 style=label_style),
28
29 self.progress_adder,
30
31 toga.Box(children=[
32 toga.Button("+", on_press=self.increase_progress,
33 style=Pack(flex=1)),
34 toga.Button("-", on_press=self.decrease_progress,
35 style=Pack(flex=1)),
36 ]),
37
38 toga.Switch("Toggle running mode", on_toggle=self.toggle_running)
39 ]),
40
41 toga.Box(style=row_box_style, children=[
42 toga.Label("default ProgressBar", style=label_style),
43 toga.ProgressBar(),
44 ]),
45
46 toga.Box(style=row_box_style, children=[
47 toga.Label("disabled ProgressBar", style=label_style),
48 toga.ProgressBar(max=None, running=False),
49 ]),
50
51 toga.Box(style=row_box_style, children=[
52 toga.Label("indeterminate ProgressBar", style=label_style),
53 toga.ProgressBar(max=None, running=True),
54 ]),
55
56 toga.Box(style=row_box_style, children=[
57 toga.Label("determinate ProgressBar", style=label_style),
58 toga.ProgressBar(max=1, running=False, value=0.5),
59 ]),
60
61 toga.Box(style=row_box_style, children=[
62 toga.Label("running determinate ProgressBar", style=label_style),
63 toga.ProgressBar(max=1, running=True, value=0.5),
64 ]),
65 ])
66
67 self.main_window.show()
68
69 def increase_progress(self, button, **kw):
70 if not self.progress_adder.is_running:
71 self.progress_adder.value += 0.1 * self.progress_adder.max
72
73 def decrease_progress(self, button, **kw):
74 if not self.progress_adder.is_running:
75 self.progress_adder.value -= 0.1 * self.progress_adder.max
76
77 def toggle_running(self, switch, **kw):
78 if switch.is_on:
79 self.progress_adder.max = None
80 self.progress_adder.start()
81 else:
82 self.progress_adder.max = 1
83 self.progress_adder.stop()
84
85
86 def main():
87 # App name and namespace
88 return ProgressBarApp('ProgressBar', 'org.beeware.examples.progressbar')
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/progressbar/progressbar/app.py b/examples/progressbar/progressbar/app.py
--- a/examples/progressbar/progressbar/app.py
+++ b/examples/progressbar/progressbar/app.py
@@ -2,6 +2,8 @@
from toga.style import Pack
from toga.constants import ROW, COLUMN
+MAX_PROGRESSBAR_VALUE = 100
+
class ProgressBarApp(toga.App):
@@ -10,10 +12,7 @@
self.main_window = toga.MainWindow(title=self.name, size=(500, 500))
# the user may change the value with +/- buttons
- self.progress_adder = toga.ProgressBar()
-
- # the user may switch between "running" mode and a set value
- self.progress_runner = toga.ProgressBar(max=None)
+ self.progress_adder = toga.ProgressBar(max=MAX_PROGRESSBAR_VALUE)
# set up common styles
label_style = Pack(flex=1, padding_right=24)
@@ -55,12 +54,18 @@
toga.Box(style=row_box_style, children=[
toga.Label("determinate ProgressBar", style=label_style),
- toga.ProgressBar(max=1, running=False, value=0.5),
+ toga.ProgressBar(
+ max=MAX_PROGRESSBAR_VALUE,
+ running=False,
+ value=0.5 * MAX_PROGRESSBAR_VALUE),
]),
toga.Box(style=row_box_style, children=[
toga.Label("running determinate ProgressBar", style=label_style),
- toga.ProgressBar(max=1, running=True, value=0.5),
+ toga.ProgressBar(
+ max=MAX_PROGRESSBAR_VALUE,
+ running=True,
+ value=0.5 * MAX_PROGRESSBAR_VALUE),
]),
])
@@ -79,7 +84,7 @@
self.progress_adder.max = None
self.progress_adder.start()
else:
- self.progress_adder.max = 1
+ self.progress_adder.max = MAX_PROGRESSBAR_VALUE
self.progress_adder.stop()
diff --git a/src/winforms/toga_winforms/widgets/progressbar.py b/src/winforms/toga_winforms/widgets/progressbar.py
--- a/src/winforms/toga_winforms/widgets/progressbar.py
+++ b/src/winforms/toga_winforms/widgets/progressbar.py
@@ -10,21 +10,31 @@
self.native = WinForms.ProgressBar()
def start(self):
- '''Not supported for WinForms implementation'''
- self.interface.factory.not_implemented('ProgressBar.start()')
-
- # possible implementation (not tested):
- # self.native.Style = ProgressBarStyle.Marquee
+ self.set_running_style()
def stop(self):
- '''Not supported for WinForms implementation'''
- self.interface.factory.not_implemented('ProgressBar.stop()')
+ self.set_stopping_style()
- # possible implementation (not tested):
- # self.native.Style = ProgressBarStyle.Continuous
+ @property
+ def max(self):
+ return self.interface.max
def set_max(self, value):
- self.native.Maximum = value
+ if value is not None:
+ self.native.Maximum = value
+ if self.interface.is_running:
+ self.set_running_style()
+ else:
+ self.set_stopping_style()
+
+ def set_running_style(self):
+ if self.max is None:
+ self.native.Style = WinForms.ProgressBarStyle.Marquee
+ else:
+ self.native.Style = WinForms.ProgressBarStyle.Blocks
+
+ def set_stopping_style(self):
+ self.native.Style = WinForms.ProgressBarStyle.Continuous
def set_value(self, value):
self.native.Value = value
| {"golden_diff": "diff --git a/examples/progressbar/progressbar/app.py b/examples/progressbar/progressbar/app.py\n--- a/examples/progressbar/progressbar/app.py\n+++ b/examples/progressbar/progressbar/app.py\n@@ -2,6 +2,8 @@\n from toga.style import Pack\n from toga.constants import ROW, COLUMN\n \n+MAX_PROGRESSBAR_VALUE = 100\n+\n \n class ProgressBarApp(toga.App):\n \n@@ -10,10 +12,7 @@\n self.main_window = toga.MainWindow(title=self.name, size=(500, 500))\n \n # the user may change the value with +/- buttons\n- self.progress_adder = toga.ProgressBar()\n-\n- # the user may switch between \"running\" mode and a set value\n- self.progress_runner = toga.ProgressBar(max=None)\n+ self.progress_adder = toga.ProgressBar(max=MAX_PROGRESSBAR_VALUE)\n \n # set up common styles\n label_style = Pack(flex=1, padding_right=24)\n@@ -55,12 +54,18 @@\n \n toga.Box(style=row_box_style, children=[\n toga.Label(\"determinate ProgressBar\", style=label_style),\n- toga.ProgressBar(max=1, running=False, value=0.5),\n+ toga.ProgressBar(\n+ max=MAX_PROGRESSBAR_VALUE,\n+ running=False,\n+ value=0.5 * MAX_PROGRESSBAR_VALUE),\n ]),\n \n toga.Box(style=row_box_style, children=[\n toga.Label(\"running determinate ProgressBar\", style=label_style),\n- toga.ProgressBar(max=1, running=True, value=0.5),\n+ toga.ProgressBar(\n+ max=MAX_PROGRESSBAR_VALUE,\n+ running=True,\n+ value=0.5 * MAX_PROGRESSBAR_VALUE),\n ]),\n ])\n \n@@ -79,7 +84,7 @@\n self.progress_adder.max = None\n self.progress_adder.start()\n else:\n- self.progress_adder.max = 1\n+ self.progress_adder.max = MAX_PROGRESSBAR_VALUE\n self.progress_adder.stop()\n \n \ndiff --git a/src/winforms/toga_winforms/widgets/progressbar.py b/src/winforms/toga_winforms/widgets/progressbar.py\n--- a/src/winforms/toga_winforms/widgets/progressbar.py\n+++ b/src/winforms/toga_winforms/widgets/progressbar.py\n@@ -10,21 +10,31 @@\n self.native = WinForms.ProgressBar()\n \n def start(self):\n- '''Not supported for WinForms implementation'''\n- self.interface.factory.not_implemented('ProgressBar.start()')\n-\n- # possible implementation (not tested):\n- # self.native.Style = ProgressBarStyle.Marquee\n+ self.set_running_style()\n \n def stop(self):\n- '''Not supported for WinForms implementation'''\n- self.interface.factory.not_implemented('ProgressBar.stop()')\n+ self.set_stopping_style()\n \n- # possible implementation (not tested):\n- # self.native.Style = ProgressBarStyle.Continuous\n+ @property\n+ def max(self):\n+ return self.interface.max\n \n def set_max(self, value):\n- self.native.Maximum = value\n+ if value is not None:\n+ self.native.Maximum = value\n+ if self.interface.is_running:\n+ self.set_running_style()\n+ else:\n+ self.set_stopping_style()\n+\n+ def set_running_style(self):\n+ if self.max is None:\n+ self.native.Style = WinForms.ProgressBarStyle.Marquee\n+ else:\n+ self.native.Style = WinForms.ProgressBarStyle.Blocks\n+\n+ def set_stopping_style(self):\n+ self.native.Style = WinForms.ProgressBarStyle.Continuous\n \n def set_value(self, value):\n self.native.Value = value\n", "issue": "Progressbar crash on start\n## Expected Behavior\r\n<!--- If you're describing a bug, tell us what you expect to happen. -->\r\nProgress bar to load\r\n<!--- If you're requesting a new feature, tell us why you'd like this feature. -->\r\n\r\n\r\n## Current Behavior\r\n<!--- If you're describing a bug, what currently happens? -->\r\n```\r\npython -m progressbar\r\nUnhandled Exception: Python.Runtime.PythonException: TypeError : 'NoneType' value cannot be converted to System.Int32\r\n at Python.Runtime.Dispatcher.Dispatch(ArrayList args)\r\n at __System_Threading_ThreadStartDispatcher.Invoke()\r\n at System.Threading.ThreadHelper.ThreadStart_Context(Object state)\r\n at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)\r\n at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)\r\n at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state)\r\n at System.Threading.ThreadHelper.ThreadStart()\r\n```\r\n## Steps to reproduce\r\n<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->\r\n1. launch progressbar\r\n\r\n## Your Environment\r\n<!--- Provide details on your current environment you found the bug in -->\r\n\r\n* Python Version (list the specific version number) 3.6.5\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version: \r\n - [ ] Linux - distro: - version:\r\n - [ * ] Windows - version: Win10 1607\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n \r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django \r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [ * ] winforms \r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass ProgressBar(Widget):\n def create(self):\n self.native = WinForms.ProgressBar()\n\n def start(self):\n '''Not supported for WinForms implementation'''\n self.interface.factory.not_implemented('ProgressBar.start()')\n\n # possible implementation (not tested):\n # self.native.Style = ProgressBarStyle.Marquee\n\n def stop(self):\n '''Not supported for WinForms implementation'''\n self.interface.factory.not_implemented('ProgressBar.stop()')\n\n # possible implementation (not tested):\n # self.native.Style = ProgressBarStyle.Continuous\n\n def set_max(self, value):\n self.native.Maximum = value\n\n def set_value(self, value):\n self.native.Value = value\n\n def rehint(self):\n # Height must be non-zero\n # Set a sensible min-width\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = self.native.PreferredSize.Height\n", "path": "src/winforms/toga_winforms/widgets/progressbar.py"}, {"content": "import toga\nfrom toga.style import Pack\nfrom toga.constants import ROW, COLUMN\n\n\nclass ProgressBarApp(toga.App):\n\n def startup(self):\n # Main window of the application with title and size\n self.main_window = toga.MainWindow(title=self.name, size=(500, 500))\n\n # the user may change the value with +/- buttons\n self.progress_adder = toga.ProgressBar()\n\n # the user may switch between \"running\" mode and a set value\n self.progress_runner = toga.ProgressBar(max=None)\n\n # set up common styles\n label_style = Pack(flex=1, padding_right=24)\n row_box_style = Pack(direction=ROW, padding=24)\n col_box_style = Pack(direction=COLUMN, padding=24)\n\n # Add the content on the main window\n self.main_window.content = toga.Box(style=col_box_style, children=[\n toga.Box(style=col_box_style, children=[\n toga.Label(\"Use the +/- buttons to change the progress\",\n style=label_style),\n\n self.progress_adder,\n\n toga.Box(children=[\n toga.Button(\"+\", on_press=self.increase_progress,\n style=Pack(flex=1)),\n toga.Button(\"-\", on_press=self.decrease_progress,\n style=Pack(flex=1)),\n ]),\n\n toga.Switch(\"Toggle running mode\", on_toggle=self.toggle_running)\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"default ProgressBar\", style=label_style),\n toga.ProgressBar(),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"disabled ProgressBar\", style=label_style),\n toga.ProgressBar(max=None, running=False),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"indeterminate ProgressBar\", style=label_style),\n toga.ProgressBar(max=None, running=True),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"determinate ProgressBar\", style=label_style),\n toga.ProgressBar(max=1, running=False, value=0.5),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"running determinate ProgressBar\", style=label_style),\n toga.ProgressBar(max=1, running=True, value=0.5),\n ]),\n ])\n\n self.main_window.show()\n\n def increase_progress(self, button, **kw):\n if not self.progress_adder.is_running:\n self.progress_adder.value += 0.1 * self.progress_adder.max\n\n def decrease_progress(self, button, **kw):\n if not self.progress_adder.is_running:\n self.progress_adder.value -= 0.1 * self.progress_adder.max\n\n def toggle_running(self, switch, **kw):\n if switch.is_on:\n self.progress_adder.max = None\n self.progress_adder.start()\n else:\n self.progress_adder.max = 1\n self.progress_adder.stop()\n\n\ndef main():\n # App name and namespace\n return ProgressBarApp('ProgressBar', 'org.beeware.examples.progressbar')\n", "path": "examples/progressbar/progressbar/app.py"}], "after_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass ProgressBar(Widget):\n def create(self):\n self.native = WinForms.ProgressBar()\n\n def start(self):\n self.set_running_style()\n\n def stop(self):\n self.set_stopping_style()\n\n @property\n def max(self):\n return self.interface.max\n\n def set_max(self, value):\n if value is not None:\n self.native.Maximum = value\n if self.interface.is_running:\n self.set_running_style()\n else:\n self.set_stopping_style()\n\n def set_running_style(self):\n if self.max is None:\n self.native.Style = WinForms.ProgressBarStyle.Marquee\n else:\n self.native.Style = WinForms.ProgressBarStyle.Blocks\n\n def set_stopping_style(self):\n self.native.Style = WinForms.ProgressBarStyle.Continuous\n\n def set_value(self, value):\n self.native.Value = value\n\n def rehint(self):\n # Height must be non-zero\n # Set a sensible min-width\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = self.native.PreferredSize.Height\n", "path": "src/winforms/toga_winforms/widgets/progressbar.py"}, {"content": "import toga\nfrom toga.style import Pack\nfrom toga.constants import ROW, COLUMN\n\nMAX_PROGRESSBAR_VALUE = 100\n\n\nclass ProgressBarApp(toga.App):\n\n def startup(self):\n # Main window of the application with title and size\n self.main_window = toga.MainWindow(title=self.name, size=(500, 500))\n\n # the user may change the value with +/- buttons\n self.progress_adder = toga.ProgressBar(max=MAX_PROGRESSBAR_VALUE)\n\n # set up common styles\n label_style = Pack(flex=1, padding_right=24)\n row_box_style = Pack(direction=ROW, padding=24)\n col_box_style = Pack(direction=COLUMN, padding=24)\n\n # Add the content on the main window\n self.main_window.content = toga.Box(style=col_box_style, children=[\n toga.Box(style=col_box_style, children=[\n toga.Label(\"Use the +/- buttons to change the progress\",\n style=label_style),\n\n self.progress_adder,\n\n toga.Box(children=[\n toga.Button(\"+\", on_press=self.increase_progress,\n style=Pack(flex=1)),\n toga.Button(\"-\", on_press=self.decrease_progress,\n style=Pack(flex=1)),\n ]),\n\n toga.Switch(\"Toggle running mode\", on_toggle=self.toggle_running)\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"default ProgressBar\", style=label_style),\n toga.ProgressBar(),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"disabled ProgressBar\", style=label_style),\n toga.ProgressBar(max=None, running=False),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"indeterminate ProgressBar\", style=label_style),\n toga.ProgressBar(max=None, running=True),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"determinate ProgressBar\", style=label_style),\n toga.ProgressBar(\n max=MAX_PROGRESSBAR_VALUE,\n running=False,\n value=0.5 * MAX_PROGRESSBAR_VALUE),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"running determinate ProgressBar\", style=label_style),\n toga.ProgressBar(\n max=MAX_PROGRESSBAR_VALUE,\n running=True,\n value=0.5 * MAX_PROGRESSBAR_VALUE),\n ]),\n ])\n\n self.main_window.show()\n\n def increase_progress(self, button, **kw):\n if not self.progress_adder.is_running:\n self.progress_adder.value += 0.1 * self.progress_adder.max\n\n def decrease_progress(self, button, **kw):\n if not self.progress_adder.is_running:\n self.progress_adder.value -= 0.1 * self.progress_adder.max\n\n def toggle_running(self, switch, **kw):\n if switch.is_on:\n self.progress_adder.max = None\n self.progress_adder.start()\n else:\n self.progress_adder.max = MAX_PROGRESSBAR_VALUE\n self.progress_adder.stop()\n\n\ndef main():\n # App name and namespace\n return ProgressBarApp('ProgressBar', 'org.beeware.examples.progressbar')\n", "path": "examples/progressbar/progressbar/app.py"}]} | 1,876 | 814 |
gh_patches_debug_25048 | rasdani/github-patches | git_diff | mosaicml__composer-1493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HuggingFace model should update word embeddings dimension according to tokenizer
** To reproduce
Steps to reproduce the behavior:
Right now if I call
```
model = transformers.AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m')
tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
model = HuggingFaceModel(model=model, tokenizer=tokenizer)
```
The bloom model will have a [250880, 1024] dim word embedding matrix and a vocab size of 250880 even though a gpt2 tokenizer with vocab size of 50257 is used.
## Expected behavior
The class HuggingFaceModel needs to update word embedding matrix if a tokenizer is supplied.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/models/huggingface.py`
Content:
```
1 # Copyright 2022 MosaicML Composer authors
2 # SPDX-License-Identifier: Apache-2.0
3
4 """A wrapper class that converts 🤗 Transformers models to composer models"""
5
6 from __future__ import annotations
7
8 from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
9
10 from torchmetrics import Metric
11
12 from composer.models.base import ComposerModel
13 from composer.utils.import_helpers import MissingConditionalImportError
14
15 if TYPE_CHECKING:
16 import transformers
17
18 __all__ = ['HuggingFaceModel']
19
20
21 class HuggingFaceModel(ComposerModel):
22 """
23 A wrapper class that converts 🤗 Transformers models to composer models.
24
25 Args:
26 model (transformers.PreTrainedModel): A 🤗 Transformers model.
27 tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``.
28 use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``
29 metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``.
30 .. warning:: This wrapper is designed to work with 🤗 datasets that define a `labels` column.
31
32 Example:
33
34 .. testcode::
35
36 import transformers
37 from composer.models import HuggingFaceModel
38
39 hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)
40 model = HuggingFaceModel(hf_model)
41 """
42
43 def __init__(self,
44 model: transformers.PreTrainedModel,
45 tokenizer: Optional[Union[transformers.PreTrainedTokenizer,
46 transformers.PreTrainedTokenizerFast]] = None,
47 use_logits: Optional[bool] = False,
48 metrics: Optional[List[Metric]] = None) -> None:
49 try:
50 import transformers
51 except ImportError as e:
52 raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
53
54 super().__init__()
55 self.model = model
56 self.config = model.config
57
58 # the set of inputs that a model expects inferred from the model type or
59 # tokenizer if provided
60 if tokenizer is None:
61 if isinstance(self.model.base_model, transformers.GPT2Model):
62 self.model_inputs = {'input_ids', 'attention_mask'}
63 elif isinstance(self.model.base_model, transformers.BertModel):
64 self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'}
65 else:
66 assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'
67 self.model_inputs = set(tokenizer.model_input_names)
68
69 self.use_logits = use_logits
70
71 self.train_metrics = None
72 self.val_metrics = None
73
74 if metrics:
75 self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}
76 self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}
77
78 self.labels = None # set in eval_forward() if exists
79
80 def forward(self, batch):
81 for key in self.model_inputs:
82 if key not in batch.keys():
83 raise ValueError(f'Batch missing key: {key}')
84
85 output = self.model(**batch) # type: ignore (thirdparty)
86 return output
87
88 def loss(self, outputs, batch):
89 return outputs['loss']
90
91 def eval_forward(self, batch, outputs: Optional[Any] = None):
92 output = outputs if outputs else self.forward(batch)
93 if self.use_logits:
94 self.labels = batch.pop('labels')
95 output = output['logits']
96
97 # if we are in the single class case, then remove the classes dimension
98 if output.shape[1] == 1:
99 output = output.squeeze(dim=1)
100
101 return output
102
103 def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:
104 if is_train:
105 metrics = self.train_metrics
106 else:
107 metrics = self.val_metrics
108
109 return metrics if metrics else {}
110
111 def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
112 metric.update(outputs, self.labels)
113
114 def get_model_inputs(self):
115 """Returns a set of inputs that the model expects in the forward pass.
116 If an algorithm wants to interact with the model inputs (for instance,
117 popping the labels for a custom loss fn, or adding attention head masks
118 for head pruning, it must access self.set_model_inputs().
119 Returns:
120 model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass.
121 """
122
123 return self.model_inputs
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/composer/models/huggingface.py b/composer/models/huggingface.py
--- a/composer/models/huggingface.py
+++ b/composer/models/huggingface.py
@@ -5,6 +5,7 @@
from __future__ import annotations
+import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from torchmetrics import Metric
@@ -15,6 +16,8 @@
if TYPE_CHECKING:
import transformers
+log = logging.getLogger(__name__)
+
__all__ = ['HuggingFaceModel']
@@ -66,6 +69,13 @@
assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'
self.model_inputs = set(tokenizer.model_input_names)
+ if self.config.vocab_size != len(tokenizer):
+ # set model's word embedding matrix and final lm_head to vocab size according to tokenizer
+ log.warning(
+ f'The number of tokens in the tokenizer and the number of tokens in the model are different.'
+ f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.')
+ self.model.resize_token_embeddings(len(tokenizer))
+
self.use_logits = use_logits
self.train_metrics = None
| {"golden_diff": "diff --git a/composer/models/huggingface.py b/composer/models/huggingface.py\n--- a/composer/models/huggingface.py\n+++ b/composer/models/huggingface.py\n@@ -5,6 +5,7 @@\n \n from __future__ import annotations\n \n+import logging\n from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n \n from torchmetrics import Metric\n@@ -15,6 +16,8 @@\n if TYPE_CHECKING:\n import transformers\n \n+log = logging.getLogger(__name__)\n+\n __all__ = ['HuggingFaceModel']\n \n \n@@ -66,6 +69,13 @@\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n \n+ if self.config.vocab_size != len(tokenizer):\n+ # set model's word embedding matrix and final lm_head to vocab size according to tokenizer\n+ log.warning(\n+ f'The number of tokens in the tokenizer and the number of tokens in the model are different.'\n+ f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.')\n+ self.model.resize_token_embeddings(len(tokenizer))\n+\n self.use_logits = use_logits\n \n self.train_metrics = None\n", "issue": "HuggingFace model should update word embeddings dimension according to tokenizer\n** To reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nRight now if I call \r\n\r\n```\r\nmodel = transformers.AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m')\r\ntokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')\r\n\r\nmodel = HuggingFaceModel(model=model, tokenizer=tokenizer)\r\n```\r\n\r\nThe bloom model will have a [250880, 1024] dim word embedding matrix and a vocab size of 250880 even though a gpt2 tokenizer with vocab size of 50257 is used.\r\n\r\n## Expected behavior\r\n\r\nThe class HuggingFaceModel needs to update word embedding matrix if a tokenizer is supplied.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"A wrapper class that converts \ud83e\udd17 Transformers models to composer models\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nfrom torchmetrics import Metric\n\nfrom composer.models.base import ComposerModel\nfrom composer.utils.import_helpers import MissingConditionalImportError\n\nif TYPE_CHECKING:\n import transformers\n\n__all__ = ['HuggingFaceModel']\n\n\nclass HuggingFaceModel(ComposerModel):\n \"\"\"\n A wrapper class that converts \ud83e\udd17 Transformers models to composer models.\n\n Args:\n model (transformers.PreTrainedModel): A \ud83e\udd17 Transformers model.\n tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``.\n use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``\n metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``.\n .. warning:: This wrapper is designed to work with \ud83e\udd17 datasets that define a `labels` column.\n\n Example:\n\n .. testcode::\n\n import transformers\n from composer.models import HuggingFaceModel\n\n hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n model = HuggingFaceModel(hf_model)\n \"\"\"\n\n def __init__(self,\n model: transformers.PreTrainedModel,\n tokenizer: Optional[Union[transformers.PreTrainedTokenizer,\n transformers.PreTrainedTokenizerFast]] = None,\n use_logits: Optional[bool] = False,\n metrics: Optional[List[Metric]] = None) -> None:\n try:\n import transformers\n except ImportError as e:\n raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e\n\n super().__init__()\n self.model = model\n self.config = model.config\n\n # the set of inputs that a model expects inferred from the model type or\n # tokenizer if provided\n if tokenizer is None:\n if isinstance(self.model.base_model, transformers.GPT2Model):\n self.model_inputs = {'input_ids', 'attention_mask'}\n elif isinstance(self.model.base_model, transformers.BertModel):\n self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'}\n else:\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n\n self.use_logits = use_logits\n\n self.train_metrics = None\n self.val_metrics = None\n\n if metrics:\n self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}\n self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}\n\n self.labels = None # set in eval_forward() if exists\n\n def forward(self, batch):\n for key in self.model_inputs:\n if key not in batch.keys():\n raise ValueError(f'Batch missing key: {key}')\n\n output = self.model(**batch) # type: ignore (thirdparty)\n return output\n\n def loss(self, outputs, batch):\n return outputs['loss']\n\n def eval_forward(self, batch, outputs: Optional[Any] = None):\n output = outputs if outputs else self.forward(batch)\n if self.use_logits:\n self.labels = batch.pop('labels')\n output = output['logits']\n\n # if we are in the single class case, then remove the classes dimension\n if output.shape[1] == 1:\n output = output.squeeze(dim=1)\n\n return output\n\n def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:\n if is_train:\n metrics = self.train_metrics\n else:\n metrics = self.val_metrics\n\n return metrics if metrics else {}\n\n def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:\n metric.update(outputs, self.labels)\n\n def get_model_inputs(self):\n \"\"\"Returns a set of inputs that the model expects in the forward pass.\n If an algorithm wants to interact with the model inputs (for instance,\n popping the labels for a custom loss fn, or adding attention head masks\n for head pruning, it must access self.set_model_inputs().\n Returns:\n model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass.\n \"\"\"\n\n return self.model_inputs\n", "path": "composer/models/huggingface.py"}], "after_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"A wrapper class that converts \ud83e\udd17 Transformers models to composer models\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nfrom torchmetrics import Metric\n\nfrom composer.models.base import ComposerModel\nfrom composer.utils.import_helpers import MissingConditionalImportError\n\nif TYPE_CHECKING:\n import transformers\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['HuggingFaceModel']\n\n\nclass HuggingFaceModel(ComposerModel):\n \"\"\"\n A wrapper class that converts \ud83e\udd17 Transformers models to composer models.\n\n Args:\n model (transformers.PreTrainedModel): A \ud83e\udd17 Transformers model.\n tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``.\n use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``\n metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``.\n .. warning:: This wrapper is designed to work with \ud83e\udd17 datasets that define a `labels` column.\n\n Example:\n\n .. testcode::\n\n import transformers\n from composer.models import HuggingFaceModel\n\n hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n model = HuggingFaceModel(hf_model)\n \"\"\"\n\n def __init__(self,\n model: transformers.PreTrainedModel,\n tokenizer: Optional[Union[transformers.PreTrainedTokenizer,\n transformers.PreTrainedTokenizerFast]] = None,\n use_logits: Optional[bool] = False,\n metrics: Optional[List[Metric]] = None) -> None:\n try:\n import transformers\n except ImportError as e:\n raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e\n\n super().__init__()\n self.model = model\n self.config = model.config\n\n # the set of inputs that a model expects inferred from the model type or\n # tokenizer if provided\n if tokenizer is None:\n if isinstance(self.model.base_model, transformers.GPT2Model):\n self.model_inputs = {'input_ids', 'attention_mask'}\n elif isinstance(self.model.base_model, transformers.BertModel):\n self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'}\n else:\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n\n if self.config.vocab_size != len(tokenizer):\n # set model's word embedding matrix and final lm_head to vocab size according to tokenizer\n log.warning(\n f'The number of tokens in the tokenizer and the number of tokens in the model are different.'\n f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.')\n self.model.resize_token_embeddings(len(tokenizer))\n\n self.use_logits = use_logits\n\n self.train_metrics = None\n self.val_metrics = None\n\n if metrics:\n self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}\n self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}\n\n self.labels = None # set in eval_forward() if exists\n\n def forward(self, batch):\n for key in self.model_inputs:\n if key not in batch.keys():\n raise ValueError(f'Batch missing key: {key}')\n\n output = self.model(**batch) # type: ignore (thirdparty)\n return output\n\n def loss(self, outputs, batch):\n return outputs['loss']\n\n def eval_forward(self, batch, outputs: Optional[Any] = None):\n output = outputs if outputs else self.forward(batch)\n if self.use_logits:\n self.labels = batch.pop('labels')\n output = output['logits']\n\n # if we are in the single class case, then remove the classes dimension\n if output.shape[1] == 1:\n output = output.squeeze(dim=1)\n\n return output\n\n def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:\n if is_train:\n metrics = self.train_metrics\n else:\n metrics = self.val_metrics\n\n return metrics if metrics else {}\n\n def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:\n metric.update(outputs, self.labels)\n\n def get_model_inputs(self):\n \"\"\"Returns a set of inputs that the model expects in the forward pass.\n If an algorithm wants to interact with the model inputs (for instance,\n popping the labels for a custom loss fn, or adding attention head masks\n for head pruning, it must access self.set_model_inputs().\n Returns:\n model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass.\n \"\"\"\n\n return self.model_inputs\n", "path": "composer/models/huggingface.py"}]} | 1,711 | 281 |
gh_patches_debug_17906 | rasdani/github-patches | git_diff | openshift__openshift-ansible-8685 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix multimaster OpenStack deployment failure
When trying to deploy multimaster OpenStack with nsupdate and either no
load balancer or OpenStack LBaaS, the creation of the DNS records would
fail.
This is because it was still relying on the old `lb` inventory group for
all multimaster scenarios, rather than just in the
`openshift_openstack_use_lbaas_load_balancer` case.
So we define a new inventory variable
`openshift_openstack_private_api_ip` (which is already defined in the
Heat templates) and use that instead.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1588428
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playbooks/openstack/inventory.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 This is an Ansible dynamic inventory for OpenStack.
4
5 It requires your OpenStack credentials to be set in clouds.yaml or your shell
6 environment.
7
8 """
9
10 from __future__ import print_function
11
12 from collections import Mapping
13 import json
14 import os
15
16 from keystoneauth1.exceptions.catalog import EndpointNotFound
17 import shade
18
19
20 def base_openshift_inventory(cluster_hosts):
21 '''Set the base openshift inventory.'''
22 inventory = {}
23
24 masters = [server.name for server in cluster_hosts
25 if server.metadata['host-type'] == 'master']
26
27 etcd = [server.name for server in cluster_hosts
28 if server.metadata['host-type'] == 'etcd']
29 if not etcd:
30 etcd = masters
31
32 infra_hosts = [server.name for server in cluster_hosts
33 if server.metadata['host-type'] == 'node' and
34 server.metadata['sub-host-type'] == 'infra']
35
36 app = [server.name for server in cluster_hosts
37 if server.metadata['host-type'] == 'node' and
38 server.metadata['sub-host-type'] == 'app']
39
40 cns = [server.name for server in cluster_hosts
41 if server.metadata['host-type'] == 'cns']
42
43 nodes = list(set(masters + infra_hosts + app + cns))
44
45 dns = [server.name for server in cluster_hosts
46 if server.metadata['host-type'] == 'dns']
47
48 load_balancers = [server.name for server in cluster_hosts
49 if server.metadata['host-type'] == 'lb']
50
51 osev3 = list(set(nodes + etcd + load_balancers))
52
53 inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
54 inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
55 inventory['masters'] = {'hosts': masters}
56 inventory['etcd'] = {'hosts': etcd}
57 inventory['nodes'] = {'hosts': nodes}
58 inventory['infra_hosts'] = {'hosts': infra_hosts}
59 inventory['app'] = {'hosts': app}
60 inventory['glusterfs'] = {'hosts': cns}
61 inventory['dns'] = {'hosts': dns}
62 inventory['lb'] = {'hosts': load_balancers}
63 inventory['localhost'] = {'ansible_connection': 'local'}
64
65 return inventory
66
67
68 def get_docker_storage_mountpoints(volumes):
69 '''Check volumes to see if they're being used for docker storage'''
70 docker_storage_mountpoints = {}
71 for volume in volumes:
72 if volume.metadata.get('purpose') == "openshift_docker_storage":
73 for attachment in volume.attachments:
74 if attachment.server_id in docker_storage_mountpoints:
75 docker_storage_mountpoints[attachment.server_id].append(attachment.device)
76 else:
77 docker_storage_mountpoints[attachment.server_id] = [attachment.device]
78 return docker_storage_mountpoints
79
80
81 def _get_hostvars(server, docker_storage_mountpoints):
82 ssh_ip_address = server.public_v4 or server.private_v4
83 hostvars = {
84 'ansible_host': ssh_ip_address
85 }
86
87 public_v4 = server.public_v4 or server.private_v4
88 if public_v4:
89 hostvars['public_v4'] = server.public_v4
90 hostvars['openshift_public_ip'] = server.public_v4
91 # TODO(shadower): what about multiple networks?
92 if server.private_v4:
93 hostvars['private_v4'] = server.private_v4
94 hostvars['openshift_ip'] = server.private_v4
95
96 # NOTE(shadower): Yes, we set both hostname and IP to the private
97 # IP address for each node. OpenStack doesn't resolve nodes by
98 # name at all, so using a hostname here would require an internal
99 # DNS which would complicate the setup and potentially introduce
100 # performance issues.
101 hostvars['openshift_hostname'] = server.metadata.get(
102 'openshift_hostname', server.private_v4)
103 hostvars['openshift_public_hostname'] = server.name
104
105 if server.metadata['host-type'] == 'cns':
106 hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
107
108 node_labels = server.metadata.get('node_labels')
109 # NOTE(shadower): the node_labels value must be a dict not string
110 if not isinstance(node_labels, Mapping):
111 node_labels = json.loads(node_labels)
112
113 if node_labels:
114 hostvars['openshift_node_labels'] = node_labels
115
116 # check for attached docker storage volumes
117 if 'os-extended-volumes:volumes_attached' in server:
118 if server.id in docker_storage_mountpoints:
119 hostvars['docker_storage_mountpoints'] = ' '.join(
120 docker_storage_mountpoints[server.id])
121 return hostvars
122
123
124 def build_inventory():
125 '''Build the dynamic inventory.'''
126 cloud = shade.openstack_cloud()
127
128 # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
129 # environment variable.
130 cluster_hosts = [
131 server for server in cloud.list_servers()
132 if 'metadata' in server and 'clusterid' in server.metadata]
133
134 inventory = base_openshift_inventory(cluster_hosts)
135
136 for server in cluster_hosts:
137 if 'group' in server.metadata:
138 group = server.metadata.get('group')
139 if group not in inventory:
140 inventory[group] = {'hosts': []}
141 inventory[group]['hosts'].append(server.name)
142
143 inventory['_meta'] = {'hostvars': {}}
144
145 # Some clouds don't have Cinder. That's okay:
146 try:
147 volumes = cloud.list_volumes()
148 except EndpointNotFound:
149 volumes = []
150
151 # cinder volumes used for docker storage
152 docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
153 for server in cluster_hosts:
154 inventory['_meta']['hostvars'][server.name] = _get_hostvars(
155 server,
156 docker_storage_mountpoints)
157
158 stout = _get_stack_outputs(cloud)
159 if stout is not None:
160 try:
161 inventory['localhost'].update({
162 'openshift_openstack_api_lb_provider':
163 stout['api_lb_provider'],
164 'openshift_openstack_api_lb_port_id':
165 stout['api_lb_vip_port_id'],
166 'openshift_openstack_api_lb_sg_id':
167 stout['api_lb_sg_id']})
168 except KeyError:
169 pass # Not an API load balanced deployment
170
171 try:
172 inventory['OSEv3']['vars'][
173 'openshift_master_cluster_hostname'] = stout['private_api_ip']
174 except KeyError:
175 pass # Internal LB not specified
176
177 inventory['localhost']['openshift_openstack_public_api_ip'] = \
178 stout.get('public_api_ip')
179 inventory['localhost']['openshift_openstack_public_router_ip'] = \
180 stout.get('public_router_ip')
181
182 try:
183 inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
184 except KeyError:
185 pass # Not a kuryr deployment
186 return inventory
187
188
189 def _get_stack_outputs(cloud_client):
190 """Returns a dictionary with the stack outputs"""
191 cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
192
193 stack = cloud_client.get_stack(cluster_name)
194 if stack is None or stack['stack_status'] not in (
195 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
196 return None
197
198 data = {}
199 for output in stack['outputs']:
200 data[output['output_key']] = output['output_value']
201 return data
202
203
204 def _get_kuryr_vars(cloud_client, data):
205 """Returns a dictionary of Kuryr variables resulting of heat stacking"""
206 settings = {}
207 settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
208 settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
209 settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
210 settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
211 settings['kuryr_openstack_pod_project_id'] = (
212 cloud_client.current_project_id)
213 settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
214
215 settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
216 settings['kuryr_openstack_username'] = cloud_client.auth['username']
217 settings['kuryr_openstack_password'] = cloud_client.auth['password']
218 if 'user_domain_id' in cloud_client.auth:
219 settings['kuryr_openstack_user_domain_name'] = (
220 cloud_client.auth['user_domain_id'])
221 else:
222 settings['kuryr_openstack_user_domain_name'] = (
223 cloud_client.auth['user_domain_name'])
224 # FIXME(apuimedo): consolidate kuryr controller credentials into the same
225 # vars the openstack playbook uses.
226 settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
227 if 'project_domain_id' in cloud_client.auth:
228 settings['kuryr_openstack_project_domain_name'] = (
229 cloud_client.auth['project_domain_id'])
230 else:
231 settings['kuryr_openstack_project_domain_name'] = (
232 cloud_client.auth['project_domain_name'])
233 return settings
234
235
236 if __name__ == '__main__':
237 print(json.dumps(build_inventory(), indent=4, sort_keys=True))
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -9,7 +9,6 @@
from __future__ import print_function
-from collections import Mapping
import json
import os
@@ -105,13 +104,8 @@
if server.metadata['host-type'] == 'cns':
hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
- node_labels = server.metadata.get('node_labels')
- # NOTE(shadower): the node_labels value must be a dict not string
- if not isinstance(node_labels, Mapping):
- node_labels = json.loads(node_labels)
-
- if node_labels:
- hostvars['openshift_node_labels'] = node_labels
+ group_name = server.metadata.get('openshift_node_group_name')
+ hostvars['openshift_node_group_name'] = group_name
# check for attached docker storage volumes
if 'os-extended-volumes:volumes_attached' in server:
| {"golden_diff": "diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py\n--- a/playbooks/openstack/inventory.py\n+++ b/playbooks/openstack/inventory.py\n@@ -9,7 +9,6 @@\n \n from __future__ import print_function\n \n-from collections import Mapping\n import json\n import os\n \n@@ -105,13 +104,8 @@\n if server.metadata['host-type'] == 'cns':\n hostvars['glusterfs_devices'] = ['/dev/nvme0n1']\n \n- node_labels = server.metadata.get('node_labels')\n- # NOTE(shadower): the node_labels value must be a dict not string\n- if not isinstance(node_labels, Mapping):\n- node_labels = json.loads(node_labels)\n-\n- if node_labels:\n- hostvars['openshift_node_labels'] = node_labels\n+ group_name = server.metadata.get('openshift_node_group_name')\n+ hostvars['openshift_node_group_name'] = group_name\n \n # check for attached docker storage volumes\n if 'os-extended-volumes:volumes_attached' in server:\n", "issue": "Fix multimaster OpenStack deployment failure\nWhen trying to deploy multimaster OpenStack with nsupdate and either no\r\nload balancer or OpenStack LBaaS, the creation of the DNS records would\r\nfail.\r\n\r\nThis is because it was still relying on the old `lb` inventory group for\r\nall multimaster scenarios, rather than just in the\r\n`openshift_openstack_use_lbaas_load_balancer` case.\r\n\r\nSo we define a new inventory variable\r\n`openshift_openstack_private_api_ip` (which is already defined in the\r\nHeat templates) and use that instead.\r\n\r\nFixes: https://bugzilla.redhat.com/show_bug.cgi?id=1588428\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis is an Ansible dynamic inventory for OpenStack.\n\nIt requires your OpenStack credentials to be set in clouds.yaml or your shell\nenvironment.\n\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom collections import Mapping\nimport json\nimport os\n\nfrom keystoneauth1.exceptions.catalog import EndpointNotFound\nimport shade\n\n\ndef base_openshift_inventory(cluster_hosts):\n '''Set the base openshift inventory.'''\n inventory = {}\n\n masters = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'master']\n\n etcd = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'etcd']\n if not etcd:\n etcd = masters\n\n infra_hosts = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'infra']\n\n app = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'app']\n\n cns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'cns']\n\n nodes = list(set(masters + infra_hosts + app + cns))\n\n dns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'dns']\n\n load_balancers = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'lb']\n\n osev3 = list(set(nodes + etcd + load_balancers))\n\n inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}\n inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}\n inventory['masters'] = {'hosts': masters}\n inventory['etcd'] = {'hosts': etcd}\n inventory['nodes'] = {'hosts': nodes}\n inventory['infra_hosts'] = {'hosts': infra_hosts}\n inventory['app'] = {'hosts': app}\n inventory['glusterfs'] = {'hosts': cns}\n inventory['dns'] = {'hosts': dns}\n inventory['lb'] = {'hosts': load_balancers}\n inventory['localhost'] = {'ansible_connection': 'local'}\n\n return inventory\n\n\ndef get_docker_storage_mountpoints(volumes):\n '''Check volumes to see if they're being used for docker storage'''\n docker_storage_mountpoints = {}\n for volume in volumes:\n if volume.metadata.get('purpose') == \"openshift_docker_storage\":\n for attachment in volume.attachments:\n if attachment.server_id in docker_storage_mountpoints:\n docker_storage_mountpoints[attachment.server_id].append(attachment.device)\n else:\n docker_storage_mountpoints[attachment.server_id] = [attachment.device]\n return docker_storage_mountpoints\n\n\ndef _get_hostvars(server, docker_storage_mountpoints):\n ssh_ip_address = server.public_v4 or server.private_v4\n hostvars = {\n 'ansible_host': ssh_ip_address\n }\n\n public_v4 = server.public_v4 or server.private_v4\n if public_v4:\n hostvars['public_v4'] = server.public_v4\n hostvars['openshift_public_ip'] = server.public_v4\n # TODO(shadower): what about multiple networks?\n if server.private_v4:\n hostvars['private_v4'] = server.private_v4\n hostvars['openshift_ip'] = server.private_v4\n\n # NOTE(shadower): Yes, we set both hostname and IP to the private\n # IP address for each node. OpenStack doesn't resolve nodes by\n # name at all, so using a hostname here would require an internal\n # DNS which would complicate the setup and potentially introduce\n # performance issues.\n hostvars['openshift_hostname'] = server.metadata.get(\n 'openshift_hostname', server.private_v4)\n hostvars['openshift_public_hostname'] = server.name\n\n if server.metadata['host-type'] == 'cns':\n hostvars['glusterfs_devices'] = ['/dev/nvme0n1']\n\n node_labels = server.metadata.get('node_labels')\n # NOTE(shadower): the node_labels value must be a dict not string\n if not isinstance(node_labels, Mapping):\n node_labels = json.loads(node_labels)\n\n if node_labels:\n hostvars['openshift_node_labels'] = node_labels\n\n # check for attached docker storage volumes\n if 'os-extended-volumes:volumes_attached' in server:\n if server.id in docker_storage_mountpoints:\n hostvars['docker_storage_mountpoints'] = ' '.join(\n docker_storage_mountpoints[server.id])\n return hostvars\n\n\ndef build_inventory():\n '''Build the dynamic inventory.'''\n cloud = shade.openstack_cloud()\n\n # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`\n # environment variable.\n cluster_hosts = [\n server for server in cloud.list_servers()\n if 'metadata' in server and 'clusterid' in server.metadata]\n\n inventory = base_openshift_inventory(cluster_hosts)\n\n for server in cluster_hosts:\n if 'group' in server.metadata:\n group = server.metadata.get('group')\n if group not in inventory:\n inventory[group] = {'hosts': []}\n inventory[group]['hosts'].append(server.name)\n\n inventory['_meta'] = {'hostvars': {}}\n\n # Some clouds don't have Cinder. That's okay:\n try:\n volumes = cloud.list_volumes()\n except EndpointNotFound:\n volumes = []\n\n # cinder volumes used for docker storage\n docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)\n for server in cluster_hosts:\n inventory['_meta']['hostvars'][server.name] = _get_hostvars(\n server,\n docker_storage_mountpoints)\n\n stout = _get_stack_outputs(cloud)\n if stout is not None:\n try:\n inventory['localhost'].update({\n 'openshift_openstack_api_lb_provider':\n stout['api_lb_provider'],\n 'openshift_openstack_api_lb_port_id':\n stout['api_lb_vip_port_id'],\n 'openshift_openstack_api_lb_sg_id':\n stout['api_lb_sg_id']})\n except KeyError:\n pass # Not an API load balanced deployment\n\n try:\n inventory['OSEv3']['vars'][\n 'openshift_master_cluster_hostname'] = stout['private_api_ip']\n except KeyError:\n pass # Internal LB not specified\n\n inventory['localhost']['openshift_openstack_public_api_ip'] = \\\n stout.get('public_api_ip')\n inventory['localhost']['openshift_openstack_public_router_ip'] = \\\n stout.get('public_router_ip')\n\n try:\n inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)\n except KeyError:\n pass # Not a kuryr deployment\n return inventory\n\n\ndef _get_stack_outputs(cloud_client):\n \"\"\"Returns a dictionary with the stack outputs\"\"\"\n cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')\n\n stack = cloud_client.get_stack(cluster_name)\n if stack is None or stack['stack_status'] not in (\n 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):\n return None\n\n data = {}\n for output in stack['outputs']:\n data[output['output_key']] = output['output_value']\n return data\n\n\ndef _get_kuryr_vars(cloud_client, data):\n \"\"\"Returns a dictionary of Kuryr variables resulting of heat stacking\"\"\"\n settings = {}\n settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']\n settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']\n settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']\n settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']\n settings['kuryr_openstack_pod_project_id'] = (\n cloud_client.current_project_id)\n settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']\n\n settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']\n settings['kuryr_openstack_username'] = cloud_client.auth['username']\n settings['kuryr_openstack_password'] = cloud_client.auth['password']\n if 'user_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_id'])\n else:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_name'])\n # FIXME(apuimedo): consolidate kuryr controller credentials into the same\n # vars the openstack playbook uses.\n settings['kuryr_openstack_project_id'] = cloud_client.current_project_id\n if 'project_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_id'])\n else:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_name'])\n return settings\n\n\nif __name__ == '__main__':\n print(json.dumps(build_inventory(), indent=4, sort_keys=True))\n", "path": "playbooks/openstack/inventory.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis is an Ansible dynamic inventory for OpenStack.\n\nIt requires your OpenStack credentials to be set in clouds.yaml or your shell\nenvironment.\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport json\nimport os\n\nfrom keystoneauth1.exceptions.catalog import EndpointNotFound\nimport shade\n\n\ndef base_openshift_inventory(cluster_hosts):\n '''Set the base openshift inventory.'''\n inventory = {}\n\n masters = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'master']\n\n etcd = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'etcd']\n if not etcd:\n etcd = masters\n\n infra_hosts = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'infra']\n\n app = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'app']\n\n cns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'cns']\n\n nodes = list(set(masters + infra_hosts + app + cns))\n\n dns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'dns']\n\n load_balancers = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'lb']\n\n osev3 = list(set(nodes + etcd + load_balancers))\n\n inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}\n inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}\n inventory['masters'] = {'hosts': masters}\n inventory['etcd'] = {'hosts': etcd}\n inventory['nodes'] = {'hosts': nodes}\n inventory['infra_hosts'] = {'hosts': infra_hosts}\n inventory['app'] = {'hosts': app}\n inventory['glusterfs'] = {'hosts': cns}\n inventory['dns'] = {'hosts': dns}\n inventory['lb'] = {'hosts': load_balancers}\n inventory['localhost'] = {'ansible_connection': 'local'}\n\n return inventory\n\n\ndef get_docker_storage_mountpoints(volumes):\n '''Check volumes to see if they're being used for docker storage'''\n docker_storage_mountpoints = {}\n for volume in volumes:\n if volume.metadata.get('purpose') == \"openshift_docker_storage\":\n for attachment in volume.attachments:\n if attachment.server_id in docker_storage_mountpoints:\n docker_storage_mountpoints[attachment.server_id].append(attachment.device)\n else:\n docker_storage_mountpoints[attachment.server_id] = [attachment.device]\n return docker_storage_mountpoints\n\n\ndef _get_hostvars(server, docker_storage_mountpoints):\n ssh_ip_address = server.public_v4 or server.private_v4\n hostvars = {\n 'ansible_host': ssh_ip_address\n }\n\n public_v4 = server.public_v4 or server.private_v4\n if public_v4:\n hostvars['public_v4'] = server.public_v4\n hostvars['openshift_public_ip'] = server.public_v4\n # TODO(shadower): what about multiple networks?\n if server.private_v4:\n hostvars['private_v4'] = server.private_v4\n hostvars['openshift_ip'] = server.private_v4\n\n # NOTE(shadower): Yes, we set both hostname and IP to the private\n # IP address for each node. OpenStack doesn't resolve nodes by\n # name at all, so using a hostname here would require an internal\n # DNS which would complicate the setup and potentially introduce\n # performance issues.\n hostvars['openshift_hostname'] = server.metadata.get(\n 'openshift_hostname', server.private_v4)\n hostvars['openshift_public_hostname'] = server.name\n\n if server.metadata['host-type'] == 'cns':\n hostvars['glusterfs_devices'] = ['/dev/nvme0n1']\n\n group_name = server.metadata.get('openshift_node_group_name')\n hostvars['openshift_node_group_name'] = group_name\n\n # check for attached docker storage volumes\n if 'os-extended-volumes:volumes_attached' in server:\n if server.id in docker_storage_mountpoints:\n hostvars['docker_storage_mountpoints'] = ' '.join(\n docker_storage_mountpoints[server.id])\n return hostvars\n\n\ndef build_inventory():\n '''Build the dynamic inventory.'''\n cloud = shade.openstack_cloud()\n\n # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`\n # environment variable.\n cluster_hosts = [\n server for server in cloud.list_servers()\n if 'metadata' in server and 'clusterid' in server.metadata]\n\n inventory = base_openshift_inventory(cluster_hosts)\n\n for server in cluster_hosts:\n if 'group' in server.metadata:\n group = server.metadata.get('group')\n if group not in inventory:\n inventory[group] = {'hosts': []}\n inventory[group]['hosts'].append(server.name)\n\n inventory['_meta'] = {'hostvars': {}}\n\n # Some clouds don't have Cinder. That's okay:\n try:\n volumes = cloud.list_volumes()\n except EndpointNotFound:\n volumes = []\n\n # cinder volumes used for docker storage\n docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)\n for server in cluster_hosts:\n inventory['_meta']['hostvars'][server.name] = _get_hostvars(\n server,\n docker_storage_mountpoints)\n\n stout = _get_stack_outputs(cloud)\n if stout is not None:\n try:\n inventory['localhost'].update({\n 'openshift_openstack_api_lb_provider':\n stout['api_lb_provider'],\n 'openshift_openstack_api_lb_port_id':\n stout['api_lb_vip_port_id'],\n 'openshift_openstack_api_lb_sg_id':\n stout['api_lb_sg_id']})\n except KeyError:\n pass # Not an API load balanced deployment\n\n try:\n inventory['OSEv3']['vars'][\n 'openshift_master_cluster_hostname'] = stout['private_api_ip']\n except KeyError:\n pass # Internal LB not specified\n\n inventory['localhost']['openshift_openstack_public_api_ip'] = \\\n stout.get('public_api_ip')\n inventory['localhost']['openshift_openstack_public_router_ip'] = \\\n stout.get('public_router_ip')\n\n try:\n inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)\n except KeyError:\n pass # Not a kuryr deployment\n return inventory\n\n\ndef _get_stack_outputs(cloud_client):\n \"\"\"Returns a dictionary with the stack outputs\"\"\"\n cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')\n\n stack = cloud_client.get_stack(cluster_name)\n if stack is None or stack['stack_status'] not in (\n 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):\n return None\n\n data = {}\n for output in stack['outputs']:\n data[output['output_key']] = output['output_value']\n return data\n\n\ndef _get_kuryr_vars(cloud_client, data):\n \"\"\"Returns a dictionary of Kuryr variables resulting of heat stacking\"\"\"\n settings = {}\n settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']\n settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']\n settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']\n settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']\n settings['kuryr_openstack_pod_project_id'] = (\n cloud_client.current_project_id)\n settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']\n\n settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']\n settings['kuryr_openstack_username'] = cloud_client.auth['username']\n settings['kuryr_openstack_password'] = cloud_client.auth['password']\n if 'user_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_id'])\n else:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_name'])\n # FIXME(apuimedo): consolidate kuryr controller credentials into the same\n # vars the openstack playbook uses.\n settings['kuryr_openstack_project_id'] = cloud_client.current_project_id\n if 'project_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_id'])\n else:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_name'])\n return settings\n\n\nif __name__ == '__main__':\n print(json.dumps(build_inventory(), indent=4, sort_keys=True))\n", "path": "playbooks/openstack/inventory.py"}]} | 3,035 | 244 |
gh_patches_debug_16540 | rasdani/github-patches | git_diff | Kinto__kinto-1343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`kinto create-user` doesn't override the password if the user already exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/accounts/scripts.py`
Content:
```
1 import logging
2 import getpass
3
4 import transaction as current_transaction
5 from pyramid.settings import asbool
6
7 from .utils import hash_password
8 from .views import AccountIdGenerator
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def create_user(env, username=None, password=None):
15 """Administrative command to create a new user."""
16 registry = env['registry']
17 settings = registry.settings
18 readonly_mode = asbool(settings.get('readonly', False))
19 if readonly_mode:
20 message = 'Cannot create a user with a readonly server.'
21 logger.error(message)
22 return 51
23
24 if 'kinto.plugins.accounts' not in settings['includes']:
25 message = 'Cannot create a user when the accounts plugin is not installed.'
26 logger.error(message)
27 return 52
28
29 try:
30 validator = AccountIdGenerator()
31 if username is None:
32 username = input('Username: ')
33 while not validator.match(username):
34 print('{} is not a valid username.')
35 print('Username should match {0!r}, please try again.'.format(validator.regexp))
36 username = input('Username: ')
37
38 if password is None:
39 while True: # The user didn't entered twice the same password
40 password = getpass.getpass('Please enter a password for {}: '.format(username))
41 confirm = getpass.getpass('Please confirm the password: '.format(username))
42
43 if password != confirm:
44 print('Sorry, passwords do not match, please try again.')
45 else:
46 break
47 except EOFError:
48 print('User creation aborted')
49 return 53
50
51 print("Creating user '{}'".format(username))
52 record = {'id': username, 'password': hash_password(password)}
53 registry.storage.create(collection_id='account',
54 parent_id=username,
55 record=record,
56 ignore_conflict=True)
57 registry.permission.add_principal_to_ace('/accounts/{}'.format(username),
58 'write',
59 'account:{}'.format(username))
60
61 current_transaction.commit()
62
63 return 0
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py
--- a/kinto/plugins/accounts/scripts.py
+++ b/kinto/plugins/accounts/scripts.py
@@ -50,10 +50,10 @@
print("Creating user '{}'".format(username))
record = {'id': username, 'password': hash_password(password)}
- registry.storage.create(collection_id='account',
+ registry.storage.update(collection_id='account',
parent_id=username,
- record=record,
- ignore_conflict=True)
+ object_id=username,
+ record=record)
registry.permission.add_principal_to_ace('/accounts/{}'.format(username),
'write',
'account:{}'.format(username))
| {"golden_diff": "diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py\n--- a/kinto/plugins/accounts/scripts.py\n+++ b/kinto/plugins/accounts/scripts.py\n@@ -50,10 +50,10 @@\n \n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n- registry.storage.create(collection_id='account',\n+ registry.storage.update(collection_id='account',\n parent_id=username,\n- record=record,\n- ignore_conflict=True)\n+ object_id=username,\n+ record=record)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n", "issue": "`kinto create-user` doesn't override the password if the user already exists.\n\n", "before_files": [{"content": "import logging\nimport getpass\n\nimport transaction as current_transaction\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.create(collection_id='account',\n parent_id=username,\n record=record,\n ignore_conflict=True)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n current_transaction.commit()\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}], "after_files": [{"content": "import logging\nimport getpass\n\nimport transaction as current_transaction\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.update(collection_id='account',\n parent_id=username,\n object_id=username,\n record=record)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n current_transaction.commit()\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}]} | 826 | 153 |
gh_patches_debug_61213 | rasdani/github-patches | git_diff | scikit-hep__pyhf-483 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TensorFlow einsum behavior change
# Description
In the test suite the `test_einsum[tensorflow]` [test is failing](https://travis-ci.org/diana-hep/pyhf/jobs/548493214#L689-L714) for `tensorflow` `v1.14.0`.
# Expected Behavior
`test_einsum[tensorflow]` passes
# Actual Behavior
```
backend = (<pyhf.tensor.tensorflow_backend.tensorflow_backend object at 0x7f11de50be10>, None)
def test_einsum(backend):
tb = pyhf.tensorlib
x = np.arange(20).reshape(5, 4).tolist()
if isinstance(pyhf.tensorlib, pyhf.tensor.mxnet_backend):
with pytest.raises(NotImplementedError):
assert tb.einsum('ij->ji', [1, 2, 3])
else:
> assert np.all(tb.tolist(tb.einsum('ij->ji', x)) == np.asarray(x).T.tolist())
tests/test_tensor.py:200:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pyhf/tensor/tensorflow_backend.py:260: in einsum
return tf.einsum(subscripts, *operands)
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/ops/special_math_ops.py:255: in einsum
input_shapes = [x.get_shape() for x in inputs]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.0 = <list_iterator object at 0x7f11cc06bb38>
> input_shapes = [x.get_shape() for x in inputs]
E AttributeError: 'list' object has no attribute 'get_shape'
```
# Steps to Reproduce
Run the test suite.
```
pytest -s tests/test_tensor.py
```
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow~=1.13',
16 'tensorflow-probability~=0.5',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch~=1.0'],
21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],
22 # 'dask': [
23 # 'dask[array]'
24 # ],
25 'xmlio': ['uproot'],
26 'minuit': ['iminuit'],
27 'develop': [
28 'pyflakes',
29 'pytest~=3.5',
30 'pytest-cov>=2.5.1',
31 'pytest-mock',
32 'pytest-benchmark[histogram]',
33 'pytest-console-scripts',
34 'python-coveralls',
35 'coverage>=4.0', # coveralls
36 'matplotlib',
37 'jupyter',
38 'nbdime',
39 'uproot~=3.3',
40 'papermill~=1.0',
41 'nteract-scrapbook~=0.2',
42 'graphviz',
43 'bumpversion',
44 'sphinx',
45 'sphinxcontrib-bibtex',
46 'sphinxcontrib-napoleon',
47 'sphinx_rtd_theme',
48 'nbsphinx',
49 'sphinx-issues',
50 'm2r',
51 'jsonpatch',
52 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
53 'pre-commit',
54 'black;python_version>="3.6"', # Black is Python3 only
55 'twine',
56 ],
57 }
58 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
59
60
61 def _is_test_pypi():
62 """
63 Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
64 set to true (c.f. .travis.yml)
65
66 The use_scm_version kwarg accepts a callable for the local_scheme
67 configuration parameter with argument "version". This can be replaced
68 with a lambda as the desired version structure is {next_version}.dev{distance}
69 c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy
70
71 As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version
72 controlled through bumpversion is used.
73 """
74 from os import getenv
75
76 return (
77 {'local_scheme': lambda version: ''}
78 if getenv('TESTPYPI_UPLOAD') == 'true'
79 else False
80 )
81
82
83 setup(
84 name='pyhf',
85 version='0.1.0',
86 description='(partial) pure python histfactory implementation',
87 long_description=long_description,
88 long_description_content_type='text/markdown',
89 url='https://github.com/diana-hep/pyhf',
90 author='Lukas Heinrich',
91 author_email='[email protected]',
92 license='Apache',
93 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
94 classifiers=[
95 "Programming Language :: Python :: 2",
96 "Programming Language :: Python :: 2.7",
97 "Programming Language :: Python :: 3",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 ],
101 packages=find_packages(),
102 include_package_data=True,
103 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
104 install_requires=[
105 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
106 'click>=6.0', # for console scripts,
107 'tqdm', # for readxml
108 'six', # for modifiers
109 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
110 'jsonpatch',
111 'pyyaml', # for parsing CLI equal-delimited options
112 ],
113 extras_require=extras_require,
114 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
115 dependency_links=[],
116 use_scm_version=_is_test_pypi(),
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
extras_require = {
'tensorflow': [
- 'tensorflow~=1.13',
+ 'tensorflow~=1.14',
'tensorflow-probability~=0.5',
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow~=1.13',\n+ 'tensorflow~=1.14',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n", "issue": "TensorFlow einsum behavior change\n# Description\r\n\r\nIn the test suite the `test_einsum[tensorflow]` [test is failing](https://travis-ci.org/diana-hep/pyhf/jobs/548493214#L689-L714) for `tensorflow` `v1.14.0`.\r\n\r\n# Expected Behavior\r\n\r\n`test_einsum[tensorflow]` passes\r\n\r\n# Actual Behavior\r\n\r\n```\r\nbackend = (<pyhf.tensor.tensorflow_backend.tensorflow_backend object at 0x7f11de50be10>, None)\r\n def test_einsum(backend):\r\n tb = pyhf.tensorlib\r\n x = np.arange(20).reshape(5, 4).tolist()\r\n \r\n if isinstance(pyhf.tensorlib, pyhf.tensor.mxnet_backend):\r\n with pytest.raises(NotImplementedError):\r\n assert tb.einsum('ij->ji', [1, 2, 3])\r\n else:\r\n> assert np.all(tb.tolist(tb.einsum('ij->ji', x)) == np.asarray(x).T.tolist())\r\ntests/test_tensor.py:200: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\npyhf/tensor/tensorflow_backend.py:260: in einsum\r\n return tf.einsum(subscripts, *operands)\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/ops/special_math_ops.py:255: in einsum\r\n input_shapes = [x.get_shape() for x in inputs]\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n.0 = <list_iterator object at 0x7f11cc06bb38>\r\n> input_shapes = [x.get_shape() for x in inputs]\r\nE AttributeError: 'list' object has no attribute 'get_shape'\r\n```\r\n\r\n# Steps to Reproduce\r\n\r\nRun the test suite.\r\n\r\n```\r\npytest -s tests/test_tensor.py\r\n```\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.14',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]} | 2,137 | 130 |
gh_patches_debug_22380 | rasdani/github-patches | git_diff | getsentry__sentry-python-355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.7.10 CeleryIntegration captures task Ignore exception
Similar to #252, I've noticed that the Sentry client with the CeleryIntegration is capturing task Ignore exceptions, which are often used with dynamic tasks (see also https://github.com/celery/celery/issues/3437). I believe that since Retries are ignored, the `Ignore` exception should also be ignored.
The exception in Sentry is showing `celery/app/task.py in replace at line 894`:
```python
raise Ignore('Replaced by new task')
```
celery: 4.3
sentry-sdk: 0.7.10
python: 3.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/celery.py`
Content:
```
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore
6
7 from sentry_sdk.hub import Hub
8 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
9 from sentry_sdk._compat import reraise
10 from sentry_sdk.integrations import Integration
11 from sentry_sdk.integrations.logging import ignore_logger
12
13
14 class CeleryIntegration(Integration):
15 identifier = "celery"
16
17 @staticmethod
18 def setup_once():
19 import celery.app.trace as trace # type: ignore
20
21 old_build_tracer = trace.build_tracer
22
23 def sentry_build_tracer(name, task, *args, **kwargs):
24 # Need to patch both methods because older celery sometimes
25 # short-circuits to task.run if it thinks it's safe.
26 task.__call__ = _wrap_task_call(task, task.__call__)
27 task.run = _wrap_task_call(task, task.run)
28 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
29
30 trace.build_tracer = sentry_build_tracer
31
32 _patch_worker_exit()
33
34 # This logger logs every status of every task that ran on the worker.
35 # Meaning that every task's breadcrumbs are full of stuff like "Task
36 # <foo> raised unexpected <bar>".
37 ignore_logger("celery.worker.job")
38
39
40 def _wrap_tracer(task, f):
41 # Need to wrap tracer for pushing the scope before prerun is sent, and
42 # popping it after postrun is sent.
43 #
44 # This is the reason we don't use signals for hooking in the first place.
45 # Also because in Celery 3, signal dispatch returns early if one handler
46 # crashes.
47 def _inner(*args, **kwargs):
48 hub = Hub.current
49 if hub.get_integration(CeleryIntegration) is None:
50 return f(*args, **kwargs)
51
52 with hub.push_scope() as scope:
53 scope._name = "celery"
54 scope.clear_breadcrumbs()
55 scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
56
57 return f(*args, **kwargs)
58
59 return _inner
60
61
62 def _wrap_task_call(task, f):
63 # Need to wrap task call because the exception is caught before we get to
64 # see it. Also celery's reported stacktrace is untrustworthy.
65 def _inner(*args, **kwargs):
66 try:
67 return f(*args, **kwargs)
68 except Exception:
69 exc_info = sys.exc_info()
70 with capture_internal_exceptions():
71 _capture_exception(task, exc_info)
72 reraise(*exc_info)
73
74 return _inner
75
76
77 def _make_event_processor(task, uuid, args, kwargs, request=None):
78 def event_processor(event, hint):
79 with capture_internal_exceptions():
80 event["transaction"] = task.name
81
82 with capture_internal_exceptions():
83 extra = event.setdefault("extra", {})
84 extra["celery-job"] = {
85 "task_name": task.name,
86 "args": args,
87 "kwargs": kwargs,
88 }
89
90 if "exc_info" in hint:
91 with capture_internal_exceptions():
92 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
93 event["fingerprint"] = [
94 "celery",
95 "SoftTimeLimitExceeded",
96 getattr(task, "name", task),
97 ]
98
99 return event
100
101 return event_processor
102
103
104 def _capture_exception(task, exc_info):
105 hub = Hub.current
106
107 if hub.get_integration(CeleryIntegration) is None:
108 return
109 if isinstance(exc_info[1], Retry):
110 return
111 if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
112 return
113
114 event, hint = event_from_exception(
115 exc_info,
116 client_options=hub.client.options,
117 mechanism={"type": "celery", "handled": False},
118 )
119
120 hub.capture_event(event, hint=hint)
121
122
123 def _patch_worker_exit():
124 # Need to flush queue before worker shutdown because a crashing worker will
125 # call os._exit
126 from billiard.pool import Worker # type: ignore
127
128 old_workloop = Worker.workloop
129
130 def sentry_workloop(*args, **kwargs):
131 try:
132 return old_workloop(*args, **kwargs)
133 finally:
134 with capture_internal_exceptions():
135 hub = Hub.current
136 if hub.get_integration(CeleryIntegration) is not None:
137 hub.flush()
138
139 Worker.workloop = sentry_workloop
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -2,7 +2,12 @@
import sys
-from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore
+from celery.exceptions import ( # type: ignore
+ SoftTimeLimitExceeded,
+ Retry,
+ Ignore,
+ Reject,
+)
from sentry_sdk.hub import Hub
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
@@ -11,6 +16,9 @@
from sentry_sdk.integrations.logging import ignore_logger
+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)
+
+
class CeleryIntegration(Integration):
identifier = "celery"
@@ -106,7 +114,7 @@
if hub.get_integration(CeleryIntegration) is None:
return
- if isinstance(exc_info[1], Retry):
+ if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):
return
if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
return
| {"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -2,7 +2,12 @@\n \n import sys\n \n-from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n+from celery.exceptions import ( # type: ignore\n+ SoftTimeLimitExceeded,\n+ Retry,\n+ Ignore,\n+ Reject,\n+)\n \n from sentry_sdk.hub import Hub\n from sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n@@ -11,6 +16,9 @@\n from sentry_sdk.integrations.logging import ignore_logger\n \n \n+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n+\n+\n class CeleryIntegration(Integration):\n identifier = \"celery\"\n \n@@ -106,7 +114,7 @@\n \n if hub.get_integration(CeleryIntegration) is None:\n return\n- if isinstance(exc_info[1], Retry):\n+ if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n", "issue": "0.7.10 CeleryIntegration captures task Ignore exception\nSimilar to #252, I've noticed that the Sentry client with the CeleryIntegration is capturing task Ignore exceptions, which are often used with dynamic tasks (see also https://github.com/celery/celery/issues/3437). I believe that since Retries are ignored, the `Ignore` exception should also be ignored.\r\n\r\nThe exception in Sentry is showing `celery/app/task.py in replace at line 894`:\r\n```python\r\nraise Ignore('Replaced by new task')\r\n```\r\n\r\ncelery: 4.3\r\nsentry-sdk: 0.7.10\r\npython: 3.6\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], Retry):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import ( # type: ignore\n SoftTimeLimitExceeded,\n Retry,\n Ignore,\n Reject,\n)\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nCELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}]} | 1,725 | 282 |
gh_patches_debug_52038 | rasdani/github-patches | git_diff | rasterio__rasterio-926 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow type annotation support for rasterio data types
Recently I've been adding PEP 484 type annotations to my projects and rasterio has proved quite ugly to use as is, the main annoyance being that `rasterio.open` may return objects belonging to three classes (`DatasetReader`, `DatasetWriter` and `BufferedDatasetWriter`) with a non-public base `DatasetBase`, so right now the only option to annotate a generic dataset is:
```
from typing import Union
from rasterio import io
def myfunc(
ds: Union[io.DatasetReader, io.DatasetWriter, io.BufferedDatasetWriter]
) -> None:
pass
```
Something like exposing the DatasetBase class inside rasterio would make the above much more readable:
```
import rasterio
def myfunc(ds: rasterio.DatasetBase) -> None:
pass
```
Would you consider restructuring a bit the dataset classes so that they are easier to use in type annotations? I guess I'm the first one having such a need :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/io.py`
Content:
```
1 """Classes capable of reading and writing datasets
2
3 Instances of these classes are called dataset objects.
4 """
5
6 import logging
7 import math
8 import uuid
9 import warnings
10
11 from rasterio._base import (
12 get_dataset_driver, driver_can_create, driver_can_create_copy)
13 from rasterio._io import (
14 DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,
15 MemoryFileBase)
16 from rasterio import enums, windows
17 from rasterio.env import Env
18 from rasterio.transform import guard_transform, xy, rowcol
19
20
21 log = logging.getLogger(__name__)
22
23
24 class TransformMethodsMixin(object):
25 """Mixin providing methods for calculations related
26 to transforming between rows and columns of the raster
27 array and the coordinates.
28
29 These methods are wrappers for the functionality in
30 `rasterio.transform` module.
31
32 A subclass with this mixin MUST provide a `transform`
33 property.
34 """
35
36 def xy(self, row, col, offset="center"):
37 """Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.
38 The pixel's center is returned by default, but a corner can be returned
39 by setting `offset` to one of `ul, ur, ll, lr`.
40
41 Parameters
42 ----------
43 row : int
44 Pixel row.
45 col : int
46 Pixel column.
47 offset : str, optional
48 Determines if the returned coordinates are for the center of the
49 pixel or for a corner.
50
51 Returns
52 -------
53 tuple
54 ``(x, y)``
55 """
56 return xy(self.transform, row, col, offset=offset)
57
58 def ul(self, row, col):
59 """Returns the coordinates (x, y) of the upper left corner of a
60 pixel at `row` and `col` in the units of the dataset's
61 coordinate reference system.
62
63 Deprecated; Use `xy(row, col, offset='ul')` instead.
64 """
65 warnings.warn("ul method is deprecated. Use xy(row, col, offset='ul')",
66 DeprecationWarning)
67 return xy(self.transform, row, col, offset='ul')
68
69 def index(self, x, y, op=math.floor, precision=6):
70 """
71 Returns the (row, col) index of the pixel containing (x, y) given a
72 coordinate reference system.
73
74 Use an epsilon, magnitude determined by the precision parameter
75 and sign determined by the op function:
76 positive for floor, negative for ceil.
77
78 Parameters
79 ----------
80 x : float
81 x value in coordinate reference system
82 y : float
83 y value in coordinate reference system
84 op : function, optional (default: math.floor)
85 Function to convert fractional pixels to whole numbers (floor,
86 ceiling, round)
87 precision : int, optional (default: 6)
88 Decimal places of precision in indexing, as in `round()`.
89
90 Returns
91 -------
92 tuple
93 (row index, col index)
94 """
95 return rowcol(self.transform, x, y, op=op, precision=precision)
96
97
98 class WindowMethodsMixin(object):
99 """Mixin providing methods for window-related calculations.
100 These methods are wrappers for the functionality in
101 `rasterio.windows` module.
102
103 A subclass with this mixin MUST provide the following
104 properties: `transform`, `height` and `width`
105 """
106
107 def window(self, left, bottom, right, top, boundless=False):
108 """Get the window corresponding to the bounding coordinates.
109
110 Parameters
111 ----------
112 left : float
113 Left (west) bounding coordinate
114 bottom : float
115 Bottom (south) bounding coordinate
116 right : float
117 Right (east) bounding coordinate
118 top : float
119 Top (north) bounding coordinate
120 boundless: boolean, optional
121 If boundless is False, window is limited
122 to extent of this dataset.
123
124 Returns
125 -------
126 window: tuple
127 ((row_start, row_stop), (col_start, col_stop))
128 corresponding to the bounding coordinates
129
130 """
131
132 transform = guard_transform(self.transform)
133 return windows.from_bounds(
134 left, bottom, right, top, transform=transform,
135 height=self.height, width=self.width, boundless=boundless)
136
137 def window_transform(self, window):
138 """Get the affine transform for a dataset window.
139
140 Parameters
141 ----------
142 window: tuple
143 Dataset window tuple
144
145 Returns
146 -------
147 transform: Affine
148 The affine transform matrix for the given window
149 """
150
151 transform = guard_transform(self.transform)
152 return windows.transform(window, transform)
153
154 def window_bounds(self, window):
155 """Get the bounds of a window
156
157 Parameters
158 ----------
159 window: tuple
160 Dataset window tuple
161
162 Returns
163 -------
164 bounds : tuple
165 x_min, y_min, x_max, y_max for the given window
166 """
167
168 transform = guard_transform(self.transform)
169 return windows.bounds(window, transform)
170
171
172 class DatasetReader(DatasetReaderBase, WindowMethodsMixin,
173 TransformMethodsMixin):
174 """An unbuffered data and metadata reader"""
175
176 def __repr__(self):
177 return "<{} DatasetReader name='{}' mode='{}'>".format(
178 self.closed and 'closed' or 'open', self.name, self.mode)
179
180
181 class DatasetWriter(DatasetWriterBase, WindowMethodsMixin,
182 TransformMethodsMixin):
183 """An unbuffered data and metadata writer. Its methods write data
184 directly to disk.
185 """
186
187 def __repr__(self):
188 return "<{} DatasetWriter name='{}' mode='{}'>".format(
189 self.closed and 'closed' or 'open', self.name, self.mode)
190
191
192 class BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,
193 TransformMethodsMixin):
194 """Maintains data and metadata in a buffer, writing to disk or
195 network only when `close()` is called.
196
197 This allows incremental updates to datasets using formats that don't
198 otherwise support updates, such as JPEG.
199 """
200
201 def __repr__(self):
202 return "<{} BufferedDatasetWriter name='{}' mode='{}'>".format(
203 self.closed and 'closed' or 'open', self.name, self.mode)
204
205
206 class MemoryFile(MemoryFileBase):
207 """A BytesIO-like object, backed by an in-memory file.
208
209 This allows formatted files to be read and written without I/O.
210
211 A MemoryFile created with initial bytes becomes immutable. A
212 MemoryFile created without initial bytes may be written to using
213 either file-like or dataset interfaces.
214
215 Examples
216 --------
217
218 A GeoTIFF can be loaded in memory and accessed using the GeoTIFF
219 format driver
220
221 >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \
222 ... MemoryFile(f.read()) as memfile:
223 ... with memfile.open() as src:
224 ... pprint.pprint(src.profile)
225 ...
226 {'count': 3,
227 'crs': CRS({'init': 'epsg:32618'}),
228 'driver': 'GTiff',
229 'dtype': 'uint8',
230 'height': 718,
231 'interleave': 'pixel',
232 'nodata': 0.0,
233 'tiled': False,
234 'transform': Affine(300.0379266750948, 0.0, 101985.0,
235 0.0, -300.041782729805, 2826915.0),
236 'width': 791}
237
238 """
239
240 def open(self, driver=None, width=None, height=None,
241 count=None, crs=None, transform=None, dtype=None, nodata=None,
242 **kwargs):
243 """Open the file and return a Rasterio dataset object.
244
245 If data has already been written, the file is opened in 'r+'
246 mode. Otherwise, the file is opened in 'w' mode.
247 """
248 with Env():
249 if self.closed:
250 raise IOError("I/O operation on closed file.")
251 if self.exists():
252 s = get_writer_for_path(self.name)(self.name, 'r+')
253 else:
254 s = get_writer_for_driver(
255 driver)(self.name, 'w', driver=driver, width=width,
256 height=height, count=count, crs=crs,
257 transform=transform, dtype=dtype, nodata=nodata,
258 **kwargs)
259 s.start()
260 return s
261
262 def __enter__(self):
263 return self
264
265 def __exit__(self, *args, **kwargs):
266 self.close()
267
268
269 def get_writer_for_driver(driver):
270 """Return the writer class appropriate for the specified driver."""
271 cls = None
272 if driver_can_create(driver):
273 cls = DatasetWriter
274 elif driver_can_create_copy(driver): # pragma: no branch
275 cls = BufferedDatasetWriter
276 return cls
277
278
279 def get_writer_for_path(path):
280 """Return the writer class appropriate for the existing dataset."""
281 driver = get_dataset_driver(path)
282 return get_writer_for_driver(driver)
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/io.py b/rasterio/io.py
--- a/rasterio/io.py
+++ b/rasterio/io.py
@@ -9,7 +9,7 @@
import warnings
from rasterio._base import (
- get_dataset_driver, driver_can_create, driver_can_create_copy)
+ DatasetBase, get_dataset_driver, driver_can_create, driver_can_create_copy)
from rasterio._io import (
DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,
MemoryFileBase)
| {"golden_diff": "diff --git a/rasterio/io.py b/rasterio/io.py\n--- a/rasterio/io.py\n+++ b/rasterio/io.py\n@@ -9,7 +9,7 @@\n import warnings\n \n from rasterio._base import (\n- get_dataset_driver, driver_can_create, driver_can_create_copy)\n+ DatasetBase, get_dataset_driver, driver_can_create, driver_can_create_copy)\n from rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\n", "issue": "Allow type annotation support for rasterio data types\nRecently I've been adding PEP 484 type annotations to my projects and rasterio has proved quite ugly to use as is, the main annoyance being that `rasterio.open` may return objects belonging to three classes (`DatasetReader`, `DatasetWriter` and `BufferedDatasetWriter`) with a non-public base `DatasetBase`, so right now the only option to annotate a generic dataset is:\r\n```\r\nfrom typing import Union\r\nfrom rasterio import io\r\n\r\ndef myfunc(\r\n ds: Union[io.DatasetReader, io.DatasetWriter, io.BufferedDatasetWriter]\r\n) -> None:\r\n pass\r\n```\r\n\r\nSomething like exposing the DatasetBase class inside rasterio would make the above much more readable:\r\n```\r\nimport rasterio\r\n\r\ndef myfunc(ds: rasterio.DatasetBase) -> None:\r\n pass\r\n```\r\n\r\nWould you consider restructuring a bit the dataset classes so that they are easier to use in type annotations? I guess I'm the first one having such a need :)\n", "before_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\nimport math\nimport uuid\nimport warnings\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio import enums, windows\nfrom rasterio.env import Env\nfrom rasterio.transform import guard_transform, xy, rowcol\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n array and the coordinates.\n\n These methods are wrappers for the functionality in\n `rasterio.transform` module.\n\n A subclass with this mixin MUST provide a `transform`\n property.\n \"\"\"\n\n def xy(self, row, col, offset=\"center\"):\n \"\"\"Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.\n The pixel's center is returned by default, but a corner can be returned\n by setting `offset` to one of `ul, ur, ll, lr`.\n\n Parameters\n ----------\n row : int\n Pixel row.\n col : int\n Pixel column.\n offset : str, optional\n Determines if the returned coordinates are for the center of the\n pixel or for a corner.\n\n Returns\n -------\n tuple\n ``(x, y)``\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n\n def ul(self, row, col):\n \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n pixel at `row` and `col` in the units of the dataset's\n coordinate reference system.\n\n Deprecated; Use `xy(row, col, offset='ul')` instead.\n \"\"\"\n warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n DeprecationWarning)\n return xy(self.transform, row, col, offset='ul')\n\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n coordinate reference system.\n\n Use an epsilon, magnitude determined by the precision parameter\n and sign determined by the op function:\n positive for floor, negative for ceil.\n\n Parameters\n ----------\n x : float\n x value in coordinate reference system\n y : float\n y value in coordinate reference system\n op : function, optional (default: math.floor)\n Function to convert fractional pixels to whole numbers (floor,\n ceiling, round)\n precision : int, optional (default: 6)\n Decimal places of precision in indexing, as in `round()`.\n\n Returns\n -------\n tuple\n (row index, col index)\n \"\"\"\n return rowcol(self.transform, x, y, op=op, precision=precision)\n\n\nclass WindowMethodsMixin(object):\n \"\"\"Mixin providing methods for window-related calculations.\n These methods are wrappers for the functionality in\n `rasterio.windows` module.\n\n A subclass with this mixin MUST provide the following\n properties: `transform`, `height` and `width`\n \"\"\"\n\n def window(self, left, bottom, right, top, boundless=False):\n \"\"\"Get the window corresponding to the bounding coordinates.\n\n Parameters\n ----------\n left : float\n Left (west) bounding coordinate\n bottom : float\n Bottom (south) bounding coordinate\n right : float\n Right (east) bounding coordinate\n top : float\n Top (north) bounding coordinate\n boundless: boolean, optional\n If boundless is False, window is limited\n to extent of this dataset.\n\n Returns\n -------\n window: tuple\n ((row_start, row_stop), (col_start, col_stop))\n corresponding to the bounding coordinates\n\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.from_bounds(\n left, bottom, right, top, transform=transform,\n height=self.height, width=self.width, boundless=boundless)\n\n def window_transform(self, window):\n \"\"\"Get the affine transform for a dataset window.\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n transform: Affine\n The affine transform matrix for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.transform(window, transform)\n\n def window_bounds(self, window):\n \"\"\"Get the bounds of a window\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n bounds : tuple\n x_min, y_min, x_max, y_max for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.bounds(window, transform)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \\\n ... MemoryFile(f.read()) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n\n def open(self, driver=None, width=None, height=None,\n count=None, crs=None, transform=None, dtype=None, nodata=None,\n **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r+'\n mode. Otherwise, the file is opened in 'w' mode.\n \"\"\"\n with Env():\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if self.exists():\n s = get_writer_for_path(self.name)(self.name, 'r+')\n else:\n s = get_writer_for_driver(\n driver)(self.name, 'w', driver=driver, width=width,\n height=height, count=count, crs=crs,\n transform=transform, dtype=dtype, nodata=nodata,\n **kwargs)\n s.start()\n return s\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.close()\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}], "after_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\nimport math\nimport uuid\nimport warnings\n\nfrom rasterio._base import (\n DatasetBase, get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio import enums, windows\nfrom rasterio.env import Env\nfrom rasterio.transform import guard_transform, xy, rowcol\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n array and the coordinates.\n\n These methods are wrappers for the functionality in\n `rasterio.transform` module.\n\n A subclass with this mixin MUST provide a `transform`\n property.\n \"\"\"\n\n def xy(self, row, col, offset=\"center\"):\n \"\"\"Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.\n The pixel's center is returned by default, but a corner can be returned\n by setting `offset` to one of `ul, ur, ll, lr`.\n\n Parameters\n ----------\n row : int\n Pixel row.\n col : int\n Pixel column.\n offset : str, optional\n Determines if the returned coordinates are for the center of the\n pixel or for a corner.\n\n Returns\n -------\n tuple\n ``(x, y)``\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n\n def ul(self, row, col):\n \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n pixel at `row` and `col` in the units of the dataset's\n coordinate reference system.\n\n Deprecated; Use `xy(row, col, offset='ul')` instead.\n \"\"\"\n warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n DeprecationWarning)\n return xy(self.transform, row, col, offset='ul')\n\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n coordinate reference system.\n\n Use an epsilon, magnitude determined by the precision parameter\n and sign determined by the op function:\n positive for floor, negative for ceil.\n\n Parameters\n ----------\n x : float\n x value in coordinate reference system\n y : float\n y value in coordinate reference system\n op : function, optional (default: math.floor)\n Function to convert fractional pixels to whole numbers (floor,\n ceiling, round)\n precision : int, optional (default: 6)\n Decimal places of precision in indexing, as in `round()`.\n\n Returns\n -------\n tuple\n (row index, col index)\n \"\"\"\n return rowcol(self.transform, x, y, op=op, precision=precision)\n\n\nclass WindowMethodsMixin(object):\n \"\"\"Mixin providing methods for window-related calculations.\n These methods are wrappers for the functionality in\n `rasterio.windows` module.\n\n A subclass with this mixin MUST provide the following\n properties: `transform`, `height` and `width`\n \"\"\"\n\n def window(self, left, bottom, right, top, boundless=False):\n \"\"\"Get the window corresponding to the bounding coordinates.\n\n Parameters\n ----------\n left : float\n Left (west) bounding coordinate\n bottom : float\n Bottom (south) bounding coordinate\n right : float\n Right (east) bounding coordinate\n top : float\n Top (north) bounding coordinate\n boundless: boolean, optional\n If boundless is False, window is limited\n to extent of this dataset.\n\n Returns\n -------\n window: tuple\n ((row_start, row_stop), (col_start, col_stop))\n corresponding to the bounding coordinates\n\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.from_bounds(\n left, bottom, right, top, transform=transform,\n height=self.height, width=self.width, boundless=boundless)\n\n def window_transform(self, window):\n \"\"\"Get the affine transform for a dataset window.\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n transform: Affine\n The affine transform matrix for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.transform(window, transform)\n\n def window_bounds(self, window):\n \"\"\"Get the bounds of a window\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n bounds : tuple\n x_min, y_min, x_max, y_max for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.bounds(window, transform)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \\\n ... MemoryFile(f.read()) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n\n def open(self, driver=None, width=None, height=None,\n count=None, crs=None, transform=None, dtype=None, nodata=None,\n **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r+'\n mode. Otherwise, the file is opened in 'w' mode.\n \"\"\"\n with Env():\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if self.exists():\n s = get_writer_for_path(self.name)(self.name, 'r+')\n else:\n s = get_writer_for_driver(\n driver)(self.name, 'w', driver=driver, width=width,\n height=height, count=count, crs=crs,\n transform=transform, dtype=dtype, nodata=nodata,\n **kwargs)\n s.start()\n return s\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.close()\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}]} | 3,203 | 111 |
gh_patches_debug_57548 | rasdani/github-patches | git_diff | nextcloud__appstore-89 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
What categories do we need?
Currently allowed categories include:
- multimedia
- pim
- tools
- games
Anything else?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/core/models.py`
Content:
```
1 from django.conf import settings # type: ignore
2 from django.contrib.auth.models import User # type: ignore
3 from django.db.models import ManyToManyField, ForeignKey, \
4 URLField, IntegerField, CharField, CASCADE, TextField, \
5 DateTimeField, Model, BooleanField # type: ignore
6 from django.utils.translation import ugettext_lazy as _ # type: ignore
7 from parler.models import TranslatedFields, TranslatableModel # type: ignore
8
9
10 class App(TranslatableModel):
11 id = CharField(max_length=128, unique=True, primary_key=True,
12 verbose_name=_('Id'),
13 help_text=_('app id, identical to folder name'))
14 categories = ManyToManyField('Category', verbose_name=_('Category'))
15 translations = TranslatedFields(
16 name=CharField(max_length=128, verbose_name=_('Name'),
17 help_text=_('Rendered app name for users')),
18 description=TextField(verbose_name=_('Description'), help_text=_(
19 'Will be rendered as Markdown'))
20 )
21 # resources
22 user_docs = URLField(max_length=256, blank=True,
23 verbose_name=_('User documentation url'))
24 admin_docs = URLField(max_length=256, blank=True,
25 verbose_name=_('Admin documentation url'))
26 developer_docs = URLField(max_length=256, blank=True,
27 verbose_name=_('Developer documentation url'))
28 issue_tracker = URLField(max_length=256, blank=True,
29 verbose_name=_('Issue tracker url'))
30 website = URLField(max_length=256, blank=True, verbose_name=_('Homepage'))
31 created = DateTimeField(auto_now_add=True, editable=False,
32 verbose_name=_('Created at'))
33 last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,
34 verbose_name=_('Updated at'))
35 owner = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('App owner'),
36 on_delete=CASCADE, related_name='owned_apps')
37 co_maintainers = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,
38 verbose_name=_('Co-Maintainers'),
39 related_name='co_maintained_apps')
40 recommendations = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,
41 verbose_name=_('Recommendations'),
42 related_name='recommended_apps')
43 featured = BooleanField(verbose_name=_('Featured'), default=False)
44
45 class Meta:
46 verbose_name = _('App')
47 verbose_name_plural = _('Apps')
48
49 def __str__(self) -> str:
50 return self.name
51
52 def can_update(self, user: User) -> bool:
53 return self.owner == user or user in self.co_maintainers.all()
54
55 def can_delete(self, user: User) -> bool:
56 return self.owner == user
57
58
59 class AppRelease(Model):
60 version = CharField(max_length=128, verbose_name=_('Version'),
61 help_text=_('Version follows Semantic Versioning'))
62 app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),
63 related_name='releases')
64 # dependencies
65 php_extensions = ManyToManyField('PhpExtension', blank=True,
66 through='PhpExtensionDependency',
67 verbose_name=_(
68 'PHP extension dependency'))
69 databases = ManyToManyField('Database', blank=True,
70 through='DatabaseDependency',
71 verbose_name=_('Database dependency'))
72 licenses = ManyToManyField('License', verbose_name=_('License'))
73 shell_commands = ManyToManyField('ShellCommand', blank=True,
74 verbose_name=_(
75 'Shell command dependency'))
76 php_version_spec = CharField(max_length=128,
77 verbose_name=_('PHP version requirement'))
78 platform_version_spec = CharField(max_length=128, verbose_name=_(
79 'Platform version requirement'))
80 min_int_size = IntegerField(blank=True, default=32,
81 verbose_name=_('Minimum Integer Bits'),
82 help_text=_('e.g. 32 for 32bit Integers'))
83 checksum = CharField(max_length=64, verbose_name=_('SHA256 checksum'))
84 download = URLField(max_length=256, blank=True,
85 verbose_name=_('Archive download Url'))
86 created = DateTimeField(auto_now_add=True, editable=False,
87 verbose_name=_('Created at'))
88 last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,
89 verbose_name=_('Updated at'))
90
91 class Meta:
92 verbose_name = _('App Release')
93 verbose_name_plural = _('App Releases')
94 unique_together = (('app', 'version'),)
95 ordering = ['-version']
96
97 def can_update(self, user: User) -> bool:
98 return self.app.owner == user or user in self.app.co_maintainers.all()
99
100 def can_delete(self, user: User) -> bool:
101 return self.can_update(user)
102
103 def __str__(self) -> str:
104 return '%s %s' % (self.app, self.version)
105
106
107 class Screenshot(Model):
108 url = URLField(max_length=256, verbose_name=_('Image url'))
109 app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),
110 related_name='screenshots')
111 ordering = IntegerField(verbose_name=_('Ordering'))
112
113 class Meta:
114 verbose_name = _('Screenshot')
115 verbose_name_plural = _('Screenshots')
116 ordering = ['ordering']
117
118 def __str__(self) -> str:
119 return self.url
120
121
122 class ShellCommand(Model):
123 name = CharField(max_length=128, unique=True, primary_key=True,
124 verbose_name=_('Shell Command'),
125 help_text=_(
126 'Name of a required shell command, e.g. grep'))
127
128 class Meta:
129 verbose_name = _('Shell Command')
130 verbose_name_plural = _('Shell Commands')
131
132 def __str__(self) -> str:
133 return self.name
134
135
136 class Category(TranslatableModel):
137 id = CharField(max_length=128, unique=True, primary_key=True,
138 verbose_name=_('Id'),
139 help_text=_(
140 'Category id which is used to identify a '
141 'category. Used to identify categories when '
142 'uploading an app'))
143 created = DateTimeField(auto_now_add=True, editable=False,
144 verbose_name=_('Created at'))
145 last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,
146 verbose_name=_('Updated at'))
147 translations = TranslatedFields(
148 name=CharField(max_length=128, help_text=_(
149 'Category name which will be presented to the user'),
150 verbose_name=_('Name')),
151 description=TextField(verbose_name=_('Description'),
152 help_text=_('Will be rendered as Markdown'))
153 )
154
155 class Meta:
156 verbose_name = _('Category')
157 verbose_name_plural = _('Categories')
158
159 def __str__(self) -> str:
160 return self.name
161
162
163 class License(Model):
164 id = CharField(max_length=128, unique=True, primary_key=True,
165 verbose_name=_('Id'),
166 help_text=_(
167 'Key which is used to identify a license'))
168 name = CharField(max_length=128, verbose_name=_('Name'),
169 help_text=_(
170 'License name which will be presented to '
171 'the user'))
172
173 class Meta:
174 verbose_name = _('License')
175 verbose_name_plural = _('Licenses')
176
177 def __str__(self) -> str:
178 return self.name
179
180
181 class Database(Model):
182 id = CharField(max_length=128, unique=True, primary_key=True,
183 verbose_name=_('Id'),
184 help_text=_('Key which is used to identify a database'))
185 name = CharField(max_length=128, verbose_name=_('Name'),
186 help_text=_(
187 'Database name which will be presented to the user'))
188
189 class Meta:
190 verbose_name = _('Database')
191 verbose_name_plural = _('Databases')
192
193 def __str__(self) -> str:
194 return self.name
195
196
197 class DatabaseDependency(Model):
198 app_release = ForeignKey('AppRelease', on_delete=CASCADE,
199 verbose_name=_('App release'),
200 related_name='databasedependencies')
201 database = ForeignKey('Database', related_name='releasedependencies',
202 on_delete=CASCADE, verbose_name=_('Database'))
203 version_spec = CharField(max_length=128,
204 verbose_name=_('Database version requirement'))
205
206 class Meta:
207 verbose_name = _('Database Dependency')
208 verbose_name_plural = _('Database Dependencies')
209 unique_together = (('app_release', 'database', 'version_spec'),)
210
211 def __str__(self) -> str:
212 return '%s: %s %s' % (self.app_release, self.database,
213 self.version_spec)
214
215
216 class PhpExtension(Model):
217 id = CharField(max_length=128, unique=True, help_text=_('e.g. libxml'),
218 primary_key=True, verbose_name=_('PHP extension'))
219
220 class Meta:
221 verbose_name = _('PHP Extension')
222 verbose_name_plural = _('PHP Extensions')
223
224 def __str__(self) -> str:
225 return self.id
226
227
228 class PhpExtensionDependency(Model):
229 app_release = ForeignKey('AppRelease', on_delete=CASCADE,
230 verbose_name=_('App Release'),
231 related_name='phpextensiondependencies')
232 php_extension = ForeignKey('PhpExtension', on_delete=CASCADE,
233 verbose_name=_('PHP Extension'),
234 related_name='releasedependencies')
235 version_spec = CharField(max_length=128,
236 verbose_name=_('Extension version requirement'))
237
238 class Meta:
239 verbose_name = _('PHP Extension Dependency')
240 verbose_name_plural = _('PHP Extension Dependencies')
241 unique_together = (('app_release', 'php_extension', 'version_spec'),)
242
243 def __str__(self) -> str:
244 return '%s: %s %s' % (self.app_release.app, self.php_extension,
245 self.version_spec)
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nextcloudappstore/core/models.py b/nextcloudappstore/core/models.py
--- a/nextcloudappstore/core/models.py
+++ b/nextcloudappstore/core/models.py
@@ -155,6 +155,7 @@
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
+ ordering = ['id']
def __str__(self) -> str:
return self.name
| {"golden_diff": "diff --git a/nextcloudappstore/core/models.py b/nextcloudappstore/core/models.py\n--- a/nextcloudappstore/core/models.py\n+++ b/nextcloudappstore/core/models.py\n@@ -155,6 +155,7 @@\n class Meta:\n verbose_name = _('Category')\n verbose_name_plural = _('Categories')\n+ ordering = ['id']\n \n def __str__(self) -> str:\n return self.name\n", "issue": "What categories do we need?\nCurrently allowed categories include:\n- multimedia\n- pim\n- tools\n- games\n\nAnything else?\n\n", "before_files": [{"content": "from django.conf import settings # type: ignore\nfrom django.contrib.auth.models import User # type: ignore\nfrom django.db.models import ManyToManyField, ForeignKey, \\\n URLField, IntegerField, CharField, CASCADE, TextField, \\\n DateTimeField, Model, BooleanField # type: ignore\nfrom django.utils.translation import ugettext_lazy as _ # type: ignore\nfrom parler.models import TranslatedFields, TranslatableModel # type: ignore\n\n\nclass App(TranslatableModel):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_('app id, identical to folder name'))\n categories = ManyToManyField('Category', verbose_name=_('Category'))\n translations = TranslatedFields(\n name=CharField(max_length=128, verbose_name=_('Name'),\n help_text=_('Rendered app name for users')),\n description=TextField(verbose_name=_('Description'), help_text=_(\n 'Will be rendered as Markdown'))\n )\n # resources\n user_docs = URLField(max_length=256, blank=True,\n verbose_name=_('User documentation url'))\n admin_docs = URLField(max_length=256, blank=True,\n verbose_name=_('Admin documentation url'))\n developer_docs = URLField(max_length=256, blank=True,\n verbose_name=_('Developer documentation url'))\n issue_tracker = URLField(max_length=256, blank=True,\n verbose_name=_('Issue tracker url'))\n website = URLField(max_length=256, blank=True, verbose_name=_('Homepage'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n owner = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('App owner'),\n on_delete=CASCADE, related_name='owned_apps')\n co_maintainers = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,\n verbose_name=_('Co-Maintainers'),\n related_name='co_maintained_apps')\n recommendations = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,\n verbose_name=_('Recommendations'),\n related_name='recommended_apps')\n featured = BooleanField(verbose_name=_('Featured'), default=False)\n\n class Meta:\n verbose_name = _('App')\n verbose_name_plural = _('Apps')\n\n def __str__(self) -> str:\n return self.name\n\n def can_update(self, user: User) -> bool:\n return self.owner == user or user in self.co_maintainers.all()\n\n def can_delete(self, user: User) -> bool:\n return self.owner == user\n\n\nclass AppRelease(Model):\n version = CharField(max_length=128, verbose_name=_('Version'),\n help_text=_('Version follows Semantic Versioning'))\n app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),\n related_name='releases')\n # dependencies\n php_extensions = ManyToManyField('PhpExtension', blank=True,\n through='PhpExtensionDependency',\n verbose_name=_(\n 'PHP extension dependency'))\n databases = ManyToManyField('Database', blank=True,\n through='DatabaseDependency',\n verbose_name=_('Database dependency'))\n licenses = ManyToManyField('License', verbose_name=_('License'))\n shell_commands = ManyToManyField('ShellCommand', blank=True,\n verbose_name=_(\n 'Shell command dependency'))\n php_version_spec = CharField(max_length=128,\n verbose_name=_('PHP version requirement'))\n platform_version_spec = CharField(max_length=128, verbose_name=_(\n 'Platform version requirement'))\n min_int_size = IntegerField(blank=True, default=32,\n verbose_name=_('Minimum Integer Bits'),\n help_text=_('e.g. 32 for 32bit Integers'))\n checksum = CharField(max_length=64, verbose_name=_('SHA256 checksum'))\n download = URLField(max_length=256, blank=True,\n verbose_name=_('Archive download Url'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n\n class Meta:\n verbose_name = _('App Release')\n verbose_name_plural = _('App Releases')\n unique_together = (('app', 'version'),)\n ordering = ['-version']\n\n def can_update(self, user: User) -> bool:\n return self.app.owner == user or user in self.app.co_maintainers.all()\n\n def can_delete(self, user: User) -> bool:\n return self.can_update(user)\n\n def __str__(self) -> str:\n return '%s %s' % (self.app, self.version)\n\n\nclass Screenshot(Model):\n url = URLField(max_length=256, verbose_name=_('Image url'))\n app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),\n related_name='screenshots')\n ordering = IntegerField(verbose_name=_('Ordering'))\n\n class Meta:\n verbose_name = _('Screenshot')\n verbose_name_plural = _('Screenshots')\n ordering = ['ordering']\n\n def __str__(self) -> str:\n return self.url\n\n\nclass ShellCommand(Model):\n name = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Shell Command'),\n help_text=_(\n 'Name of a required shell command, e.g. grep'))\n\n class Meta:\n verbose_name = _('Shell Command')\n verbose_name_plural = _('Shell Commands')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Category(TranslatableModel):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_(\n 'Category id which is used to identify a '\n 'category. Used to identify categories when '\n 'uploading an app'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n translations = TranslatedFields(\n name=CharField(max_length=128, help_text=_(\n 'Category name which will be presented to the user'),\n verbose_name=_('Name')),\n description=TextField(verbose_name=_('Description'),\n help_text=_('Will be rendered as Markdown'))\n )\n\n class Meta:\n verbose_name = _('Category')\n verbose_name_plural = _('Categories')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass License(Model):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_(\n 'Key which is used to identify a license'))\n name = CharField(max_length=128, verbose_name=_('Name'),\n help_text=_(\n 'License name which will be presented to '\n 'the user'))\n\n class Meta:\n verbose_name = _('License')\n verbose_name_plural = _('Licenses')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Database(Model):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_('Key which is used to identify a database'))\n name = CharField(max_length=128, verbose_name=_('Name'),\n help_text=_(\n 'Database name which will be presented to the user'))\n\n class Meta:\n verbose_name = _('Database')\n verbose_name_plural = _('Databases')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass DatabaseDependency(Model):\n app_release = ForeignKey('AppRelease', on_delete=CASCADE,\n verbose_name=_('App release'),\n related_name='databasedependencies')\n database = ForeignKey('Database', related_name='releasedependencies',\n on_delete=CASCADE, verbose_name=_('Database'))\n version_spec = CharField(max_length=128,\n verbose_name=_('Database version requirement'))\n\n class Meta:\n verbose_name = _('Database Dependency')\n verbose_name_plural = _('Database Dependencies')\n unique_together = (('app_release', 'database', 'version_spec'),)\n\n def __str__(self) -> str:\n return '%s: %s %s' % (self.app_release, self.database,\n self.version_spec)\n\n\nclass PhpExtension(Model):\n id = CharField(max_length=128, unique=True, help_text=_('e.g. libxml'),\n primary_key=True, verbose_name=_('PHP extension'))\n\n class Meta:\n verbose_name = _('PHP Extension')\n verbose_name_plural = _('PHP Extensions')\n\n def __str__(self) -> str:\n return self.id\n\n\nclass PhpExtensionDependency(Model):\n app_release = ForeignKey('AppRelease', on_delete=CASCADE,\n verbose_name=_('App Release'),\n related_name='phpextensiondependencies')\n php_extension = ForeignKey('PhpExtension', on_delete=CASCADE,\n verbose_name=_('PHP Extension'),\n related_name='releasedependencies')\n version_spec = CharField(max_length=128,\n verbose_name=_('Extension version requirement'))\n\n class Meta:\n verbose_name = _('PHP Extension Dependency')\n verbose_name_plural = _('PHP Extension Dependencies')\n unique_together = (('app_release', 'php_extension', 'version_spec'),)\n\n def __str__(self) -> str:\n return '%s: %s %s' % (self.app_release.app, self.php_extension,\n self.version_spec)\n", "path": "nextcloudappstore/core/models.py"}], "after_files": [{"content": "from django.conf import settings # type: ignore\nfrom django.contrib.auth.models import User # type: ignore\nfrom django.db.models import ManyToManyField, ForeignKey, \\\n URLField, IntegerField, CharField, CASCADE, TextField, \\\n DateTimeField, Model, BooleanField # type: ignore\nfrom django.utils.translation import ugettext_lazy as _ # type: ignore\nfrom parler.models import TranslatedFields, TranslatableModel # type: ignore\n\n\nclass App(TranslatableModel):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_('app id, identical to folder name'))\n categories = ManyToManyField('Category', verbose_name=_('Category'))\n translations = TranslatedFields(\n name=CharField(max_length=128, verbose_name=_('Name'),\n help_text=_('Rendered app name for users')),\n description=TextField(verbose_name=_('Description'), help_text=_(\n 'Will be rendered as Markdown'))\n )\n # resources\n user_docs = URLField(max_length=256, blank=True,\n verbose_name=_('User documentation url'))\n admin_docs = URLField(max_length=256, blank=True,\n verbose_name=_('Admin documentation url'))\n developer_docs = URLField(max_length=256, blank=True,\n verbose_name=_('Developer documentation url'))\n issue_tracker = URLField(max_length=256, blank=True,\n verbose_name=_('Issue tracker url'))\n website = URLField(max_length=256, blank=True, verbose_name=_('Homepage'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n owner = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('App owner'),\n on_delete=CASCADE, related_name='owned_apps')\n co_maintainers = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,\n verbose_name=_('Co-Maintainers'),\n related_name='co_maintained_apps')\n recommendations = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,\n verbose_name=_('Recommendations'),\n related_name='recommended_apps')\n featured = BooleanField(verbose_name=_('Featured'), default=False)\n\n class Meta:\n verbose_name = _('App')\n verbose_name_plural = _('Apps')\n\n def __str__(self) -> str:\n return self.name\n\n def can_update(self, user: User) -> bool:\n return self.owner == user or user in self.co_maintainers.all()\n\n def can_delete(self, user: User) -> bool:\n return self.owner == user\n\n\nclass AppRelease(Model):\n version = CharField(max_length=128, verbose_name=_('Version'),\n help_text=_('Version follows Semantic Versioning'))\n app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),\n related_name='releases')\n # dependencies\n php_extensions = ManyToManyField('PhpExtension', blank=True,\n through='PhpExtensionDependency',\n verbose_name=_(\n 'PHP extension dependency'))\n databases = ManyToManyField('Database', blank=True,\n through='DatabaseDependency',\n verbose_name=_('Database dependency'))\n licenses = ManyToManyField('License', verbose_name=_('License'))\n shell_commands = ManyToManyField('ShellCommand', blank=True,\n verbose_name=_(\n 'Shell command dependency'))\n php_version_spec = CharField(max_length=128,\n verbose_name=_('PHP version requirement'))\n platform_version_spec = CharField(max_length=128, verbose_name=_(\n 'Platform version requirement'))\n min_int_size = IntegerField(blank=True, default=32,\n verbose_name=_('Minimum Integer Bits'),\n help_text=_('e.g. 32 for 32bit Integers'))\n checksum = CharField(max_length=64, verbose_name=_('SHA256 checksum'))\n download = URLField(max_length=256, blank=True,\n verbose_name=_('Archive download Url'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n\n class Meta:\n verbose_name = _('App Release')\n verbose_name_plural = _('App Releases')\n unique_together = (('app', 'version'),)\n ordering = ['-version']\n\n def can_update(self, user: User) -> bool:\n return self.app.owner == user or user in self.app.co_maintainers.all()\n\n def can_delete(self, user: User) -> bool:\n return self.can_update(user)\n\n def __str__(self) -> str:\n return '%s %s' % (self.app, self.version)\n\n\nclass Screenshot(Model):\n url = URLField(max_length=256, verbose_name=_('Image url'))\n app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),\n related_name='screenshots')\n ordering = IntegerField(verbose_name=_('Ordering'))\n\n class Meta:\n verbose_name = _('Screenshot')\n verbose_name_plural = _('Screenshots')\n ordering = ['ordering']\n\n def __str__(self) -> str:\n return self.url\n\n\nclass ShellCommand(Model):\n name = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Shell Command'),\n help_text=_(\n 'Name of a required shell command, e.g. grep'))\n\n class Meta:\n verbose_name = _('Shell Command')\n verbose_name_plural = _('Shell Commands')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Category(TranslatableModel):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_(\n 'Category id which is used to identify a '\n 'category. Used to identify categories when '\n 'uploading an app'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n translations = TranslatedFields(\n name=CharField(max_length=128, help_text=_(\n 'Category name which will be presented to the user'),\n verbose_name=_('Name')),\n description=TextField(verbose_name=_('Description'),\n help_text=_('Will be rendered as Markdown'))\n )\n\n class Meta:\n verbose_name = _('Category')\n verbose_name_plural = _('Categories')\n ordering = ['id']\n\n def __str__(self) -> str:\n return self.name\n\n\nclass License(Model):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_(\n 'Key which is used to identify a license'))\n name = CharField(max_length=128, verbose_name=_('Name'),\n help_text=_(\n 'License name which will be presented to '\n 'the user'))\n\n class Meta:\n verbose_name = _('License')\n verbose_name_plural = _('Licenses')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Database(Model):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_('Key which is used to identify a database'))\n name = CharField(max_length=128, verbose_name=_('Name'),\n help_text=_(\n 'Database name which will be presented to the user'))\n\n class Meta:\n verbose_name = _('Database')\n verbose_name_plural = _('Databases')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass DatabaseDependency(Model):\n app_release = ForeignKey('AppRelease', on_delete=CASCADE,\n verbose_name=_('App release'),\n related_name='databasedependencies')\n database = ForeignKey('Database', related_name='releasedependencies',\n on_delete=CASCADE, verbose_name=_('Database'))\n version_spec = CharField(max_length=128,\n verbose_name=_('Database version requirement'))\n\n class Meta:\n verbose_name = _('Database Dependency')\n verbose_name_plural = _('Database Dependencies')\n unique_together = (('app_release', 'database', 'version_spec'),)\n\n def __str__(self) -> str:\n return '%s: %s %s' % (self.app_release, self.database,\n self.version_spec)\n\n\nclass PhpExtension(Model):\n id = CharField(max_length=128, unique=True, help_text=_('e.g. libxml'),\n primary_key=True, verbose_name=_('PHP extension'))\n\n class Meta:\n verbose_name = _('PHP Extension')\n verbose_name_plural = _('PHP Extensions')\n\n def __str__(self) -> str:\n return self.id\n\n\nclass PhpExtensionDependency(Model):\n app_release = ForeignKey('AppRelease', on_delete=CASCADE,\n verbose_name=_('App Release'),\n related_name='phpextensiondependencies')\n php_extension = ForeignKey('PhpExtension', on_delete=CASCADE,\n verbose_name=_('PHP Extension'),\n related_name='releasedependencies')\n version_spec = CharField(max_length=128,\n verbose_name=_('Extension version requirement'))\n\n class Meta:\n verbose_name = _('PHP Extension Dependency')\n verbose_name_plural = _('PHP Extension Dependencies')\n unique_together = (('app_release', 'php_extension', 'version_spec'),)\n\n def __str__(self) -> str:\n return '%s: %s %s' % (self.app_release.app, self.php_extension,\n self.version_spec)\n", "path": "nextcloudappstore/core/models.py"}]} | 2,982 | 100 |
gh_patches_debug_9738 | rasdani/github-patches | git_diff | modin-project__modin-6337 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ray is incompatible with pydantic>=2.0
We should pin `pydantic<2.0` to workaround the issues on Ray side.
Example [from](https://github.com/modin-project/modin/actions/runs/5425526005/jobs/9866377841):
```bash
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/__init__.py", line 1, in <module>
from ray.util.state.api import (
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/api.py", line 17, in <module>
from ray.util.state.common import (
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/common.py", line 120, in <module>
@dataclass(init=True)
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/pydantic/dataclasses.py", line 139, in dataclass
assert init is False, 'pydantic.dataclasses.dataclass only supports init=False'
AssertionError: pydantic.dataclasses.dataclass only supports init=False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r", encoding="utf-8") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
8 # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
9 ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
10 unidist_deps = ["unidist[mpi]>=0.2.1"]
11 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
12 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
13 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
14 all_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps
15
16 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
17 # This file provides the "import pandas before Ray init" feature if specific
18 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
19 cmdclass = versioneer.get_cmdclass()
20 extra_files = ["modin-autoimport-pandas.pth"]
21
22
23 class AddPthFileBuild(cmdclass["build_py"]):
24 def _get_data_files(self):
25 return (super()._get_data_files() or []) + [
26 (".", ".", self.build_lib, extra_files)
27 ]
28
29
30 class AddPthFileSDist(cmdclass["sdist"]):
31 def make_distribution(self):
32 self.filelist.extend(extra_files)
33 return super().make_distribution()
34
35
36 cmdclass["build_py"] = AddPthFileBuild
37 cmdclass["sdist"] = AddPthFileSDist
38
39 setup(
40 name="modin",
41 version=versioneer.get_version(),
42 cmdclass=cmdclass,
43 description="Modin: Make your pandas code run faster by changing one line of code.",
44 packages=find_packages(exclude=["scripts", "scripts.*"]),
45 include_package_data=True,
46 license="Apache 2",
47 url="https://github.com/modin-project/modin",
48 long_description=long_description,
49 long_description_content_type="text/markdown",
50 install_requires=[
51 "pandas>=2,<2.1",
52 "packaging",
53 "numpy>=1.18.5",
54 "fsspec",
55 "psutil",
56 ],
57 extras_require={
58 # can be installed by pip install modin[dask]
59 "dask": dask_deps,
60 "ray": ray_deps,
61 "unidist": unidist_deps,
62 "remote": remote_deps,
63 "spreadsheet": spreadsheet_deps,
64 "sql": sql_deps,
65 "all": all_deps,
66 },
67 python_requires=">=3.8",
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,8 @@
dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
-ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
+# pydantic<2: https://github.com/modin-project/modin/issues/6336
+ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow", "pydantic<2"]
unidist_deps = ["unidist[mpi]>=0.2.1"]
remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,7 +6,8 @@\n \n dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n-ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\n+# pydantic<2: https://github.com/modin-project/modin/issues/6336\n+ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\", \"pydantic<2\"]\n unidist_deps = [\"unidist[mpi]>=0.2.1\"]\n remote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\n spreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\n", "issue": "Ray is incompatible with pydantic>=2.0\nWe should pin `pydantic<2.0` to workaround the issues on Ray side.\r\n\r\n\r\nExample [from](https://github.com/modin-project/modin/actions/runs/5425526005/jobs/9866377841):\r\n```bash\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/__init__.py\", line 1, in <module>\r\n from ray.util.state.api import (\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/api.py\", line 17, in <module>\r\n from ray.util.state.common import (\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/common.py\", line 120, in <module>\r\n @dataclass(init=True)\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/pydantic/dataclasses.py\", line 139, in dataclass\r\n assert init is False, 'pydantic.dataclasses.dataclass only supports init=False'\r\nAssertionError: pydantic.dataclasses.dataclass only supports init=False\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\nray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n# pydantic<2: https://github.com/modin-project/modin/issues/6336\nray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\", \"pydantic<2\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]} | 1,309 | 225 |
gh_patches_debug_23237 | rasdani/github-patches | git_diff | joke2k__faker-956 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyfloat, pydecimal cannot be negative when left_digits=0
I am trying to fake random floats in the range `(-1, 1)`. Currently, `pyfloat` and `pydecimal` will never be negative as long as `left_digits=0`, even if `positive=False`.
This seems to be because [the `sign` (-1, 1) is multiplied by only the left digits](https://github.com/joke2k/faker/blob/6e32e07/faker/providers/python/__init__.py#L62) (which in this case is 0, canceling out the `sign`), rather than by the entire numeric value.
### Steps to reproduce
```python
>>> import faker
>>> fake = faker.Faker()
>>> any(fake.pyfloat(left_digits=0, positive=False) < 0 for _ in range(10000))
False
```
### Expected behavior
I'd expect approximately half of the floats to be negative, when `positive` is not `True`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/python/__init__.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 from decimal import Decimal
6 import sys
7
8 import six
9
10 from .. import BaseProvider
11
12
13 class Provider(BaseProvider):
14 def pybool(self):
15 return self.random_int(0, 1) == 1
16
17 def pystr(self, min_chars=None, max_chars=20):
18 """
19 Generates a random string of upper and lowercase letters.
20 :type min_chars: int
21 :type max_chars: int
22 :return: String. Random of random length between min and max characters.
23 """
24 if min_chars is None:
25 return "".join(self.random_letters(length=max_chars))
26 else:
27 assert (
28 max_chars >= min_chars), "Maximum length must be greater than or equal to minium length"
29 return "".join(
30 self.random_letters(
31 length=self.generator.random.randint(min_chars, max_chars),
32 ),
33 )
34
35 def pyfloat(self, left_digits=None, right_digits=None, positive=False,
36 min_value=None, max_value=None):
37
38 if left_digits is not None and left_digits < 0:
39 raise ValueError(
40 'A float number cannot have less than 0 digits in its '
41 'integer part')
42 if right_digits is not None and right_digits < 0:
43 raise ValueError(
44 'A float number cannot have less than 0 digits in its '
45 'fractional part')
46 if left_digits == 0 and right_digits == 0:
47 raise ValueError(
48 'A float number cannot have less than 0 digits in total')
49 if None not in (min_value, max_value) and min_value > max_value:
50 raise ValueError('Min value cannot be greater than max value')
51
52 left_digits = left_digits if left_digits is not None else (
53 self.random_int(1, sys.float_info.dig))
54 right_digits = right_digits if right_digits is not None else (
55 self.random_int(0, sys.float_info.dig - left_digits))
56 sign = 1 if positive else self.random_element((-1, 1))
57
58 if (min_value is not None) or (max_value is not None):
59 if min_value is None:
60 min_value = max_value - self.random_int()
61 if max_value is None:
62 max_value = min_value + self.random_int()
63
64 left_number = self.random_int(min_value, max_value)
65 else:
66 left_number = sign * self.random_number(left_digits)
67
68 return float("{0}.{1}".format(
69 left_number,
70 self.random_number(right_digits),
71 ))
72
73 def pyint(self, min=0, max=9999, step=1):
74 return self.generator.random_int(min, max, step=step)
75
76 def pydecimal(self, left_digits=None, right_digits=None, positive=False,
77 min_value=None, max_value=None):
78
79 float_ = self.pyfloat(
80 left_digits, right_digits, positive, min_value, max_value)
81 return Decimal(str(float_))
82
83 def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):
84 return tuple(
85 self.pyset(
86 nb_elements,
87 variable_nb_elements,
88 *value_types))
89
90 def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):
91 return set(
92 self._pyiterable(
93 nb_elements,
94 variable_nb_elements,
95 *value_types))
96
97 def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):
98 return list(
99 self._pyiterable(
100 nb_elements,
101 variable_nb_elements,
102 *value_types))
103
104 def pyiterable(
105 self,
106 nb_elements=10,
107 variable_nb_elements=True,
108 *value_types):
109 return self.random_element([self.pylist, self.pytuple, self.pyset])(
110 nb_elements, variable_nb_elements, *value_types)
111
112 def _random_type(self, type_list):
113 value_type = self.random_element(type_list)
114
115 method_name = "py{0}".format(value_type)
116 if hasattr(self, method_name):
117 value_type = method_name
118
119 return self.generator.format(value_type)
120
121 def _pyiterable(
122 self,
123 nb_elements=10,
124 variable_nb_elements=True,
125 *value_types):
126
127 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()
128 for t in value_types
129 # avoid recursion
130 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
131 if not value_types:
132 value_types = ['str', 'str', 'str', 'str', 'float',
133 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
134
135 if variable_nb_elements:
136 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
137
138 for _ in range(nb_elements):
139 yield self._random_type(value_types)
140
141 def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):
142 """
143 Returns a dictionary.
144
145 :nb_elements: number of elements for dictionary
146 :variable_nb_elements: is use variable number of elements for dictionary
147 :value_types: type of dictionary values
148 """
149 if variable_nb_elements:
150 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
151
152 return dict(zip(
153 self.generator.words(nb_elements),
154 self._pyiterable(nb_elements, False, *value_types),
155 ))
156
157 def pystruct(self, count=10, *value_types):
158
159 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()
160 for t in value_types
161 # avoid recursion
162 if t != 'struct']
163 if not value_types:
164 value_types = ['str', 'str', 'str', 'str', 'float',
165 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
166
167 types = []
168 d = {}
169 nd = {}
170 for i in range(count):
171 d[self.generator.word()] = self._random_type(value_types)
172 types.append(self._random_type(value_types))
173 nd[self.generator.word()] = {i: self._random_type(value_types),
174 i + 1: [self._random_type(value_types),
175 self._random_type(value_types),
176 self._random_type(value_types)],
177 i + 2: {i: self._random_type(value_types),
178 i + 1: self._random_type(value_types),
179 i + 2: [self._random_type(value_types),
180 self._random_type(value_types)]}}
181 return types, d, nd
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -53,8 +53,7 @@
self.random_int(1, sys.float_info.dig))
right_digits = right_digits if right_digits is not None else (
self.random_int(0, sys.float_info.dig - left_digits))
- sign = 1 if positive else self.random_element((-1, 1))
-
+ sign = ''
if (min_value is not None) or (max_value is not None):
if min_value is None:
min_value = max_value - self.random_int()
@@ -63,9 +62,11 @@
left_number = self.random_int(min_value, max_value)
else:
- left_number = sign * self.random_number(left_digits)
+ sign = '+' if positive else self.random_element(('+', '-'))
+ left_number = self.random_number(left_digits)
- return float("{0}.{1}".format(
+ return float("{0}{1}.{2}".format(
+ sign,
left_number,
self.random_number(right_digits),
))
| {"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -53,8 +53,7 @@\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n- sign = 1 if positive else self.random_element((-1, 1))\n-\n+ sign = ''\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n@@ -63,9 +62,11 @@\n \n left_number = self.random_int(min_value, max_value)\n else:\n- left_number = sign * self.random_number(left_digits)\n+ sign = '+' if positive else self.random_element(('+', '-'))\n+ left_number = self.random_number(left_digits)\n \n- return float(\"{0}.{1}\".format(\n+ return float(\"{0}{1}.{2}\".format(\n+ sign,\n left_number,\n self.random_number(right_digits),\n ))\n", "issue": "pyfloat, pydecimal cannot be negative when left_digits=0\nI am trying to fake random floats in the range `(-1, 1)`. Currently, `pyfloat` and `pydecimal` will never be negative as long as `left_digits=0`, even if `positive=False`.\r\n\r\nThis seems to be because [the `sign` (-1, 1) is multiplied by only the left digits](https://github.com/joke2k/faker/blob/6e32e07/faker/providers/python/__init__.py#L62) (which in this case is 0, canceling out the `sign`), rather than by the entire numeric value.\r\n\r\n### Steps to reproduce\r\n\r\n```python\r\n>>> import faker\r\n>>> fake = faker.Faker()\r\n>>> any(fake.pyfloat(left_digits=0, positive=False) < 0 for _ in range(10000))\r\nFalse\r\n```\r\n\r\n### Expected behavior\r\n\r\nI'd expect approximately half of the floats to be negative, when `positive` is not `True`\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nimport six\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minium length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = 1 if positive else self.random_element((-1, 1))\n\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n\n left_number = self.random_int(min_value, max_value)\n else:\n left_number = sign * self.random_number(left_digits)\n\n return float(\"{0}.{1}\".format(\n left_number,\n self.random_number(right_digits),\n ))\n\n def pyint(self, min=0, max=9999, step=1):\n return self.generator.random_int(min, max, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types),\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nimport six\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minium length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = ''\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n\n left_number = self.random_int(min_value, max_value)\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n return float(\"{0}{1}.{2}\".format(\n sign,\n left_number,\n self.random_number(right_digits),\n ))\n\n def pyint(self, min=0, max=9999, step=1):\n return self.generator.random_int(min, max, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types),\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]} | 2,379 | 269 |
gh_patches_debug_3681 | rasdani/github-patches | git_diff | ivy-llc__ivy-13823 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rand_like
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/torch/random_sampling.py`
Content:
```
1 import ivy
2 from ivy.func_wrapper import with_supported_dtypes
3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
4
5 try:
6 from torch import Generator
7 except ImportError:
8 from types import SimpleNamespace
9
10 Generator = SimpleNamespace
11
12
13 def seed() -> int:
14 """Returns a 64 bit number used to seed the RNG"""
15 return int(ivy.randint(-(2**63), 2**63 - 1))
16
17
18 @to_ivy_arrays_and_back
19 def manual_seed(seed: int):
20 ivy.seed(seed_value=seed)
21 return Generator().manual_seed(seed)
22
23
24 @with_supported_dtypes(
25 {
26 "1.11.0 and below": (
27 "float32",
28 "float64",
29 )
30 },
31 "torch",
32 )
33 @to_ivy_arrays_and_back
34 def multinomial(input, num_samples, replacement=False, *, generator=None, out=None):
35 return ivy.multinomial(
36 num_samples + 1, # doesn't matter because `probs` is provided, but should be
37 # greater than the number of samples
38 num_samples,
39 probs=input,
40 replace=replacement,
41 out=out,
42 )
43
44
45 @with_supported_dtypes(
46 {
47 "1.11.0 and below": (
48 "float32",
49 "float64",
50 )
51 },
52 "torch",
53 )
54 @to_ivy_arrays_and_back
55 def poisson(input, generator=None):
56 return ivy.poisson(input, shape=None)
57
58
59 @to_ivy_arrays_and_back
60 def rand(
61 size,
62 *,
63 generator=None,
64 out=None,
65 dtype=None,
66 layout=None,
67 device=None,
68 requires_grad=False,
69 pin_memory=False
70 ):
71 return ivy.random_uniform(
72 shape=size,
73 out=out,
74 dtype=dtype,
75 device=device,
76 )
77
78
79 @to_ivy_arrays_and_back
80 def randn(
81 size,
82 *,
83 generator=None,
84 out=None,
85 dtype=None,
86 layout=None,
87 device=None,
88 requires_grad=False,
89 pin_memory=False
90 ):
91 return ivy.random_normal(
92 shape=size,
93 out=out,
94 dtype=dtype,
95 device=device,
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py
--- a/ivy/functional/frontends/torch/random_sampling.py
+++ b/ivy/functional/frontends/torch/random_sampling.py
@@ -76,6 +76,27 @@
)
+@to_ivy_arrays_and_back
+def rand_like(
+ input,
+ *,
+ dtype=None,
+ layout=None,
+ device=None,
+ requires_grad=False,
+ memory_format=False
+):
+ shape = input.shape
+ if not dtype:
+ dtype = input.dtype
+
+ return ivy.random_uniform(
+ shape=shape,
+ dtype=dtype,
+ device=device,
+ )
+
+
@to_ivy_arrays_and_back
def randn(
size,
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py\n--- a/ivy/functional/frontends/torch/random_sampling.py\n+++ b/ivy/functional/frontends/torch/random_sampling.py\n@@ -76,6 +76,27 @@\n )\n \n \n+@to_ivy_arrays_and_back\n+def rand_like(\n+ input,\n+ *,\n+ dtype=None,\n+ layout=None,\n+ device=None,\n+ requires_grad=False,\n+ memory_format=False\n+):\n+ shape = input.shape\n+ if not dtype:\n+ dtype = input.dtype\n+\n+ return ivy.random_uniform(\n+ shape=shape,\n+ dtype=dtype,\n+ device=device,\n+ )\n+\n+\n @to_ivy_arrays_and_back\n def randn(\n size,\n", "issue": "rand_like\n\n", "before_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}], "after_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef rand_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=False\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_uniform(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}]} | 940 | 188 |
gh_patches_debug_2345 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-87 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Entities loaded in gcloud.datastore don't have a dataset
``` python
>>> dataset = demo.get_dataset()
>>> query = dataset.query()
>>> entity = query.fetch()[0]
>>> entity.delete()
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "gcloud/datastore/entity.py", line 206, in delete
self.dataset().connection().delete_entity(
AttributeError: 'NoneType' object has no attribute 'delete_entity'
```
This is because we're creating entities from the protobufs, with the proper `dataset_id` but not a true reference to the Dataset object (which has a pointer to the connection).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcloud/datastore/query.py`
Content:
```
1 import copy
2
3 from gcloud.datastore import datastore_v1_pb2 as datastore_pb
4 from gcloud.datastore import helpers
5 from gcloud.datastore.entity import Entity
6
7
8 # TODO: Figure out how to properly handle namespaces.
9
10 class Query(object):
11 """A Query against the Cloud Datastore.
12
13 This class serves as an abstraction for creating
14 a query over data stored in the Cloud Datastore.
15
16 Each :class:`Query` object is immutable,
17 and a clone is returned whenever
18 any part of the query is modified::
19
20 >>> query = Query('MyKind')
21 >>> limited_query = query.limit(10)
22 >>> query.limit() == 10
23 False
24 >>> limited_query.limit() == 10
25 True
26
27 You typically won't construct a :class:`Query`
28 by initializing it like ``Query('MyKind', dataset=...)``
29 but instead use the helper
30 :func:`gcloud.datastore.dataset.Dataset.query` method
31 which generates a query that can be executed
32 without any additional work::
33
34 >>> from gcloud import datastore
35 >>> dataset = datastore.get_dataset('dataset-id', email, key_path)
36 >>> query = dataset.query('MyKind')
37
38 :type kind: string
39 :param kind: The kind to query.
40
41 :type dataset: :class:`gcloud.datastore.dataset.Dataset`
42 :param dataset: The dataset to query.
43 """
44
45 OPERATORS = {
46 '<': datastore_pb.PropertyFilter.LESS_THAN,
47 '<=': datastore_pb.PropertyFilter.LESS_THAN_OR_EQUAL,
48 '>': datastore_pb.PropertyFilter.GREATER_THAN,
49 '>=': datastore_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,
50 '=': datastore_pb.PropertyFilter.EQUAL,
51 }
52 """Mapping of operator strings and their protobuf equivalents."""
53
54 def __init__(self, kind=None, dataset=None):
55 self._dataset = dataset
56 self._pb = datastore_pb.Query()
57
58 if kind:
59 self._pb.kind.add().name = kind
60
61 def _clone(self):
62 # TODO(jjg): Double check that this makes sense...
63 clone = copy.deepcopy(self)
64 clone._dataset = self._dataset # Shallow copy the dataset.
65 return clone
66
67 def to_protobuf(self):
68 """Convert the :class:`Query` instance to a :class:`gcloud.datastore.datastore_v1_pb2.Query`.
69
70 :rtype: :class:`gclouddatstore.datastore_v1_pb2.Query`
71 :returns: A Query protobuf that can be sent to the protobuf API.
72 """
73 return self._pb
74
75 def filter(self, expression, value):
76 """Filter the query based on an expression and a value.
77
78 This will return a clone of the current :class:`Query`
79 filtered by the expression and value provided.
80
81 Expressions take the form of::
82
83 .filter('<property> <operator>', <value>)
84
85 where property is a property stored on the entity in the datastore
86 and operator is one of ``OPERATORS``
87 (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::
88
89 >>> query = Query('Person')
90 >>> filtered_query = query.filter('name =', 'James')
91 >>> filtered_query = query.filter('age >', 50)
92
93 Because each call to ``.filter()`` returns a cloned ``Query`` object
94 we are able to string these together::
95
96 >>> query = Query('Person').filter('name =', 'James').filter('age >', 50)
97
98 :type expression: string
99 :param expression: An expression of a property and an operator (ie, ``=``).
100
101 :type value: integer, string, boolean, float, None, datetime
102 :param value: The value to filter on.
103
104 :rtype: :class:`Query`
105 :returns: A Query filtered by the expression and value provided.
106 """
107 clone = self._clone()
108
109 # Take an expression like 'property >=', and parse it into useful pieces.
110 property_name, operator = None, None
111 expression = expression.strip()
112
113 for operator_string in self.OPERATORS:
114 if expression.endswith(operator_string):
115 operator = self.OPERATORS[operator_string]
116 property_name = expression[0:-len(operator_string)].strip()
117
118 if not operator or not property_name:
119 raise ValueError('Invalid expression: "%s"' % expression)
120
121 # Build a composite filter AND'd together.
122 composite_filter = clone._pb.filter.composite_filter
123 composite_filter.operator = datastore_pb.CompositeFilter.AND
124
125 # Add the specific filter
126 property_filter = composite_filter.filter.add().property_filter
127 property_filter.property.name = property_name
128 property_filter.operator = operator
129
130 # Set the value to filter on based on the type.
131 attr_name, pb_value = helpers.get_protobuf_attribute_and_value(value)
132 setattr(property_filter.value, attr_name, pb_value)
133 return clone
134
135 def kind(self, *kinds):
136 """Get or set the Kind of the Query.
137
138 .. note::
139 This is an **additive** operation.
140 That is, if the Query is set for kinds A and B,
141 and you call ``.kind('C')``,
142 it will query for kinds A, B, *and*, C.
143
144 :type kinds: string
145 :param kinds: The entity kinds for which to query.
146
147 :rtype: string or :class:`Query`
148 :returns: If no arguments, returns the kind.
149 If a kind is provided, returns a clone of the :class:`Query`
150 with those kinds set.
151 """
152 # TODO: Do we want this to be additive?
153 # If not, clear the _pb.kind attribute.
154 if kinds:
155 clone = self._clone()
156 for kind in kinds:
157 clone._pb.kind.add().name = kind
158 return clone
159 else:
160 return self._pb.kind
161
162 def limit(self, limit=None):
163 """Get or set the limit of the Query.
164
165 This is the maximum number of rows (Entities) to return for this Query.
166
167 This is a hybrid getter / setter, used as::
168
169 >>> query = Query('Person')
170 >>> query = query.limit(100) # Set the limit to 100 rows.
171 >>> query.limit() # Get the limit for this query.
172 100
173
174 :rtype: integer, None, or :class:`Query`
175 :returns: If no arguments, returns the current limit.
176 If a limit is provided, returns a clone of the :class:`Query`
177 with that limit set.
178 """
179 if limit:
180 clone = self._clone()
181 clone._pb.limit = limit
182 return clone
183 else:
184 return self._pb.limit
185
186 def dataset(self, dataset=None):
187 """Get or set the :class:`gcloud.datastore.dataset.Dataset` for this Query.
188
189 This is the dataset against which the Query will be run.
190
191 This is a hybrid getter / setter, used as::
192
193 >>> query = Query('Person')
194 >>> query = query.dataset(my_dataset) # Set the dataset.
195 >>> query.dataset() # Get the current dataset.
196 <Dataset object>
197
198 :rtype: :class:`gcloud.datastore.dataset.Dataset`, None, or :class:`Query`
199 :returns: If no arguments, returns the current dataset.
200 If a dataset is provided, returns a clone of the :class:`Query`
201 with that dataset set.
202 """
203 if dataset:
204 clone = self._clone()
205 clone._dataset = dataset
206 return clone
207 else:
208 return self._dataset
209
210 def fetch(self, limit=None):
211 """Executes the Query and returns all matching entities.
212
213 This makes an API call to the Cloud Datastore,
214 sends the Query as a protobuf,
215 parses the responses to Entity protobufs,
216 and then converts them to :class:`gcloud.datastore.entity.Entity` objects.
217
218 For example::
219
220 >>> from gcloud import datastore
221 >>> dataset = datastore.get_dataset('dataset-id', email, key_path)
222 >>> query = dataset.query('Person').filter('name =', 'Sally')
223 >>> query.fetch()
224 [<Entity object>, <Entity object>, ...]
225 >>> query.fetch(1)
226 [<Entity object>]
227 >>> query.limit()
228 None
229
230 :type limit: integer
231 :param limit: An optional limit to apply temporarily to this query.
232 That is, the Query itself won't be altered,
233 but the limit will be applied to the query
234 before it is executed.
235
236 :rtype: list of :class:`gcloud.datastore.entity.Entity`'s
237 :returns: The list of entities matching this query's criteria.
238 """
239 clone = self
240
241 if limit:
242 clone = self.limit(limit)
243
244 entity_pbs = self.dataset().connection().run_query(
245 query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())
246
247 return [Entity.from_protobuf(entity) for entity in entity_pbs]
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gcloud/datastore/query.py b/gcloud/datastore/query.py
--- a/gcloud/datastore/query.py
+++ b/gcloud/datastore/query.py
@@ -244,4 +244,5 @@
entity_pbs = self.dataset().connection().run_query(
query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())
- return [Entity.from_protobuf(entity) for entity in entity_pbs]
+ return [Entity.from_protobuf(entity, dataset=self.dataset())
+ for entity in entity_pbs]
| {"golden_diff": "diff --git a/gcloud/datastore/query.py b/gcloud/datastore/query.py\n--- a/gcloud/datastore/query.py\n+++ b/gcloud/datastore/query.py\n@@ -244,4 +244,5 @@\n entity_pbs = self.dataset().connection().run_query(\n query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())\n \n- return [Entity.from_protobuf(entity) for entity in entity_pbs]\n+ return [Entity.from_protobuf(entity, dataset=self.dataset())\n+ for entity in entity_pbs]\n", "issue": "Entities loaded in gcloud.datastore don't have a dataset\n``` python\n>>> dataset = demo.get_dataset()\n>>> query = dataset.query()\n>>> entity = query.fetch()[0]\n>>> entity.delete()\n... \nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"gcloud/datastore/entity.py\", line 206, in delete\n self.dataset().connection().delete_entity(\nAttributeError: 'NoneType' object has no attribute 'delete_entity'\n```\n\nThis is because we're creating entities from the protobufs, with the proper `dataset_id` but not a true reference to the Dataset object (which has a pointer to the connection).\n\n", "before_files": [{"content": "import copy\n\nfrom gcloud.datastore import datastore_v1_pb2 as datastore_pb\nfrom gcloud.datastore import helpers\nfrom gcloud.datastore.entity import Entity\n\n\n# TODO: Figure out how to properly handle namespaces.\n\nclass Query(object):\n \"\"\"A Query against the Cloud Datastore.\n\n This class serves as an abstraction for creating\n a query over data stored in the Cloud Datastore.\n\n Each :class:`Query` object is immutable,\n and a clone is returned whenever\n any part of the query is modified::\n\n >>> query = Query('MyKind')\n >>> limited_query = query.limit(10)\n >>> query.limit() == 10\n False\n >>> limited_query.limit() == 10\n True\n\n You typically won't construct a :class:`Query`\n by initializing it like ``Query('MyKind', dataset=...)``\n but instead use the helper\n :func:`gcloud.datastore.dataset.Dataset.query` method\n which generates a query that can be executed\n without any additional work::\n\n >>> from gcloud import datastore\n >>> dataset = datastore.get_dataset('dataset-id', email, key_path)\n >>> query = dataset.query('MyKind')\n\n :type kind: string\n :param kind: The kind to query.\n\n :type dataset: :class:`gcloud.datastore.dataset.Dataset`\n :param dataset: The dataset to query.\n \"\"\"\n\n OPERATORS = {\n '<': datastore_pb.PropertyFilter.LESS_THAN,\n '<=': datastore_pb.PropertyFilter.LESS_THAN_OR_EQUAL,\n '>': datastore_pb.PropertyFilter.GREATER_THAN,\n '>=': datastore_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,\n '=': datastore_pb.PropertyFilter.EQUAL,\n }\n \"\"\"Mapping of operator strings and their protobuf equivalents.\"\"\"\n\n def __init__(self, kind=None, dataset=None):\n self._dataset = dataset\n self._pb = datastore_pb.Query()\n\n if kind:\n self._pb.kind.add().name = kind\n\n def _clone(self):\n # TODO(jjg): Double check that this makes sense...\n clone = copy.deepcopy(self)\n clone._dataset = self._dataset # Shallow copy the dataset.\n return clone\n\n def to_protobuf(self):\n \"\"\"Convert the :class:`Query` instance to a :class:`gcloud.datastore.datastore_v1_pb2.Query`.\n\n :rtype: :class:`gclouddatstore.datastore_v1_pb2.Query`\n :returns: A Query protobuf that can be sent to the protobuf API.\n \"\"\"\n return self._pb\n\n def filter(self, expression, value):\n \"\"\"Filter the query based on an expression and a value.\n\n This will return a clone of the current :class:`Query`\n filtered by the expression and value provided.\n\n Expressions take the form of::\n\n .filter('<property> <operator>', <value>)\n\n where property is a property stored on the entity in the datastore\n and operator is one of ``OPERATORS``\n (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::\n\n >>> query = Query('Person')\n >>> filtered_query = query.filter('name =', 'James')\n >>> filtered_query = query.filter('age >', 50)\n\n Because each call to ``.filter()`` returns a cloned ``Query`` object\n we are able to string these together::\n\n >>> query = Query('Person').filter('name =', 'James').filter('age >', 50)\n\n :type expression: string\n :param expression: An expression of a property and an operator (ie, ``=``).\n\n :type value: integer, string, boolean, float, None, datetime\n :param value: The value to filter on.\n\n :rtype: :class:`Query`\n :returns: A Query filtered by the expression and value provided.\n \"\"\"\n clone = self._clone()\n\n # Take an expression like 'property >=', and parse it into useful pieces.\n property_name, operator = None, None\n expression = expression.strip()\n\n for operator_string in self.OPERATORS:\n if expression.endswith(operator_string):\n operator = self.OPERATORS[operator_string]\n property_name = expression[0:-len(operator_string)].strip()\n\n if not operator or not property_name:\n raise ValueError('Invalid expression: \"%s\"' % expression)\n\n # Build a composite filter AND'd together.\n composite_filter = clone._pb.filter.composite_filter\n composite_filter.operator = datastore_pb.CompositeFilter.AND\n\n # Add the specific filter\n property_filter = composite_filter.filter.add().property_filter\n property_filter.property.name = property_name\n property_filter.operator = operator\n\n # Set the value to filter on based on the type.\n attr_name, pb_value = helpers.get_protobuf_attribute_and_value(value)\n setattr(property_filter.value, attr_name, pb_value)\n return clone\n\n def kind(self, *kinds):\n \"\"\"Get or set the Kind of the Query.\n\n .. note::\n This is an **additive** operation.\n That is, if the Query is set for kinds A and B,\n and you call ``.kind('C')``,\n it will query for kinds A, B, *and*, C.\n\n :type kinds: string\n :param kinds: The entity kinds for which to query.\n\n :rtype: string or :class:`Query`\n :returns: If no arguments, returns the kind.\n If a kind is provided, returns a clone of the :class:`Query`\n with those kinds set.\n \"\"\"\n # TODO: Do we want this to be additive?\n # If not, clear the _pb.kind attribute.\n if kinds:\n clone = self._clone()\n for kind in kinds:\n clone._pb.kind.add().name = kind\n return clone\n else:\n return self._pb.kind\n\n def limit(self, limit=None):\n \"\"\"Get or set the limit of the Query.\n\n This is the maximum number of rows (Entities) to return for this Query.\n\n This is a hybrid getter / setter, used as::\n\n >>> query = Query('Person')\n >>> query = query.limit(100) # Set the limit to 100 rows.\n >>> query.limit() # Get the limit for this query.\n 100\n\n :rtype: integer, None, or :class:`Query`\n :returns: If no arguments, returns the current limit.\n If a limit is provided, returns a clone of the :class:`Query`\n with that limit set.\n \"\"\"\n if limit:\n clone = self._clone()\n clone._pb.limit = limit\n return clone\n else:\n return self._pb.limit\n\n def dataset(self, dataset=None):\n \"\"\"Get or set the :class:`gcloud.datastore.dataset.Dataset` for this Query.\n\n This is the dataset against which the Query will be run.\n\n This is a hybrid getter / setter, used as::\n\n >>> query = Query('Person')\n >>> query = query.dataset(my_dataset) # Set the dataset.\n >>> query.dataset() # Get the current dataset.\n <Dataset object>\n\n :rtype: :class:`gcloud.datastore.dataset.Dataset`, None, or :class:`Query`\n :returns: If no arguments, returns the current dataset.\n If a dataset is provided, returns a clone of the :class:`Query`\n with that dataset set.\n \"\"\"\n if dataset:\n clone = self._clone()\n clone._dataset = dataset\n return clone\n else:\n return self._dataset\n\n def fetch(self, limit=None):\n \"\"\"Executes the Query and returns all matching entities.\n\n This makes an API call to the Cloud Datastore,\n sends the Query as a protobuf,\n parses the responses to Entity protobufs,\n and then converts them to :class:`gcloud.datastore.entity.Entity` objects.\n\n For example::\n\n >>> from gcloud import datastore\n >>> dataset = datastore.get_dataset('dataset-id', email, key_path)\n >>> query = dataset.query('Person').filter('name =', 'Sally')\n >>> query.fetch()\n [<Entity object>, <Entity object>, ...]\n >>> query.fetch(1)\n [<Entity object>]\n >>> query.limit()\n None\n\n :type limit: integer\n :param limit: An optional limit to apply temporarily to this query.\n That is, the Query itself won't be altered,\n but the limit will be applied to the query\n before it is executed.\n\n :rtype: list of :class:`gcloud.datastore.entity.Entity`'s\n :returns: The list of entities matching this query's criteria.\n \"\"\"\n clone = self\n\n if limit:\n clone = self.limit(limit)\n\n entity_pbs = self.dataset().connection().run_query(\n query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())\n\n return [Entity.from_protobuf(entity) for entity in entity_pbs]\n", "path": "gcloud/datastore/query.py"}], "after_files": [{"content": "import copy\n\nfrom gcloud.datastore import datastore_v1_pb2 as datastore_pb\nfrom gcloud.datastore import helpers\nfrom gcloud.datastore.entity import Entity\n\n\n# TODO: Figure out how to properly handle namespaces.\n\nclass Query(object):\n \"\"\"A Query against the Cloud Datastore.\n\n This class serves as an abstraction for creating\n a query over data stored in the Cloud Datastore.\n\n Each :class:`Query` object is immutable,\n and a clone is returned whenever\n any part of the query is modified::\n\n >>> query = Query('MyKind')\n >>> limited_query = query.limit(10)\n >>> query.limit() == 10\n False\n >>> limited_query.limit() == 10\n True\n\n You typically won't construct a :class:`Query`\n by initializing it like ``Query('MyKind', dataset=...)``\n but instead use the helper\n :func:`gcloud.datastore.dataset.Dataset.query` method\n which generates a query that can be executed\n without any additional work::\n\n >>> from gcloud import datastore\n >>> dataset = datastore.get_dataset('dataset-id', email, key_path)\n >>> query = dataset.query('MyKind')\n\n :type kind: string\n :param kind: The kind to query.\n\n :type dataset: :class:`gcloud.datastore.dataset.Dataset`\n :param dataset: The dataset to query.\n \"\"\"\n\n OPERATORS = {\n '<': datastore_pb.PropertyFilter.LESS_THAN,\n '<=': datastore_pb.PropertyFilter.LESS_THAN_OR_EQUAL,\n '>': datastore_pb.PropertyFilter.GREATER_THAN,\n '>=': datastore_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,\n '=': datastore_pb.PropertyFilter.EQUAL,\n }\n \"\"\"Mapping of operator strings and their protobuf equivalents.\"\"\"\n\n def __init__(self, kind=None, dataset=None):\n self._dataset = dataset\n self._pb = datastore_pb.Query()\n\n if kind:\n self._pb.kind.add().name = kind\n\n def _clone(self):\n # TODO(jjg): Double check that this makes sense...\n clone = copy.deepcopy(self)\n clone._dataset = self._dataset # Shallow copy the dataset.\n return clone\n\n def to_protobuf(self):\n \"\"\"Convert the :class:`Query` instance to a :class:`gcloud.datastore.datastore_v1_pb2.Query`.\n\n :rtype: :class:`gclouddatstore.datastore_v1_pb2.Query`\n :returns: A Query protobuf that can be sent to the protobuf API.\n \"\"\"\n return self._pb\n\n def filter(self, expression, value):\n \"\"\"Filter the query based on an expression and a value.\n\n This will return a clone of the current :class:`Query`\n filtered by the expression and value provided.\n\n Expressions take the form of::\n\n .filter('<property> <operator>', <value>)\n\n where property is a property stored on the entity in the datastore\n and operator is one of ``OPERATORS``\n (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::\n\n >>> query = Query('Person')\n >>> filtered_query = query.filter('name =', 'James')\n >>> filtered_query = query.filter('age >', 50)\n\n Because each call to ``.filter()`` returns a cloned ``Query`` object\n we are able to string these together::\n\n >>> query = Query('Person').filter('name =', 'James').filter('age >', 50)\n\n :type expression: string\n :param expression: An expression of a property and an operator (ie, ``=``).\n\n :type value: integer, string, boolean, float, None, datetime\n :param value: The value to filter on.\n\n :rtype: :class:`Query`\n :returns: A Query filtered by the expression and value provided.\n \"\"\"\n clone = self._clone()\n\n # Take an expression like 'property >=', and parse it into useful pieces.\n property_name, operator = None, None\n expression = expression.strip()\n\n for operator_string in self.OPERATORS:\n if expression.endswith(operator_string):\n operator = self.OPERATORS[operator_string]\n property_name = expression[0:-len(operator_string)].strip()\n\n if not operator or not property_name:\n raise ValueError('Invalid expression: \"%s\"' % expression)\n\n # Build a composite filter AND'd together.\n composite_filter = clone._pb.filter.composite_filter\n composite_filter.operator = datastore_pb.CompositeFilter.AND\n\n # Add the specific filter\n property_filter = composite_filter.filter.add().property_filter\n property_filter.property.name = property_name\n property_filter.operator = operator\n\n # Set the value to filter on based on the type.\n attr_name, pb_value = helpers.get_protobuf_attribute_and_value(value)\n setattr(property_filter.value, attr_name, pb_value)\n return clone\n\n def kind(self, *kinds):\n \"\"\"Get or set the Kind of the Query.\n\n .. note::\n This is an **additive** operation.\n That is, if the Query is set for kinds A and B,\n and you call ``.kind('C')``,\n it will query for kinds A, B, *and*, C.\n\n :type kinds: string\n :param kinds: The entity kinds for which to query.\n\n :rtype: string or :class:`Query`\n :returns: If no arguments, returns the kind.\n If a kind is provided, returns a clone of the :class:`Query`\n with those kinds set.\n \"\"\"\n # TODO: Do we want this to be additive?\n # If not, clear the _pb.kind attribute.\n if kinds:\n clone = self._clone()\n for kind in kinds:\n clone._pb.kind.add().name = kind\n return clone\n else:\n return self._pb.kind\n\n def limit(self, limit=None):\n \"\"\"Get or set the limit of the Query.\n\n This is the maximum number of rows (Entities) to return for this Query.\n\n This is a hybrid getter / setter, used as::\n\n >>> query = Query('Person')\n >>> query = query.limit(100) # Set the limit to 100 rows.\n >>> query.limit() # Get the limit for this query.\n 100\n\n :rtype: integer, None, or :class:`Query`\n :returns: If no arguments, returns the current limit.\n If a limit is provided, returns a clone of the :class:`Query`\n with that limit set.\n \"\"\"\n if limit:\n clone = self._clone()\n clone._pb.limit = limit\n return clone\n else:\n return self._pb.limit\n\n def dataset(self, dataset=None):\n \"\"\"Get or set the :class:`gcloud.datastore.dataset.Dataset` for this Query.\n\n This is the dataset against which the Query will be run.\n\n This is a hybrid getter / setter, used as::\n\n >>> query = Query('Person')\n >>> query = query.dataset(my_dataset) # Set the dataset.\n >>> query.dataset() # Get the current dataset.\n <Dataset object>\n\n :rtype: :class:`gcloud.datastore.dataset.Dataset`, None, or :class:`Query`\n :returns: If no arguments, returns the current dataset.\n If a dataset is provided, returns a clone of the :class:`Query`\n with that dataset set.\n \"\"\"\n if dataset:\n clone = self._clone()\n clone._dataset = dataset\n return clone\n else:\n return self._dataset\n\n def fetch(self, limit=None):\n \"\"\"Executes the Query and returns all matching entities.\n\n This makes an API call to the Cloud Datastore,\n sends the Query as a protobuf,\n parses the responses to Entity protobufs,\n and then converts them to :class:`gcloud.datastore.entity.Entity` objects.\n\n For example::\n\n >>> from gcloud import datastore\n >>> dataset = datastore.get_dataset('dataset-id', email, key_path)\n >>> query = dataset.query('Person').filter('name =', 'Sally')\n >>> query.fetch()\n [<Entity object>, <Entity object>, ...]\n >>> query.fetch(1)\n [<Entity object>]\n >>> query.limit()\n None\n\n :type limit: integer\n :param limit: An optional limit to apply temporarily to this query.\n That is, the Query itself won't be altered,\n but the limit will be applied to the query\n before it is executed.\n\n :rtype: list of :class:`gcloud.datastore.entity.Entity`'s\n :returns: The list of entities matching this query's criteria.\n \"\"\"\n clone = self\n\n if limit:\n clone = self.limit(limit)\n\n entity_pbs = self.dataset().connection().run_query(\n query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())\n\n return [Entity.from_protobuf(entity, dataset=self.dataset())\n for entity in entity_pbs]\n", "path": "gcloud/datastore/query.py"}]} | 3,021 | 120 |
gh_patches_debug_66902 | rasdani/github-patches | git_diff | ivy-llc__ivy-17524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
argsort
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/search.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_supported_dtypes(
10 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
11 "paddle",
12 )
13 @to_ivy_arrays_and_back
14 def argmax(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
15 return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)
16
17
18 @with_supported_dtypes(
19 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
20 "paddle",
21 )
22 @to_ivy_arrays_and_back
23 def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
24 return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py
--- a/ivy/functional/frontends/paddle/tensor/search.py
+++ b/ivy/functional/frontends/paddle/tensor/search.py
@@ -22,3 +22,12 @@
@to_ivy_arrays_and_back
def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
+
+
+@with_supported_dtypes(
+ {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def argsort(x, /, *, axis=-1, descending=False, name=None):
+ return ivy.argsort(x, axis=axis, descending=descending)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py\n--- a/ivy/functional/frontends/paddle/tensor/search.py\n+++ b/ivy/functional/frontends/paddle/tensor/search.py\n@@ -22,3 +22,12 @@\n @to_ivy_arrays_and_back\n def argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def argsort(x, /, *, axis=-1, descending=False, name=None):\n+ return ivy.argsort(x, axis=axis, descending=descending)\n", "issue": "argsort\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argsort(x, /, *, axis=-1, descending=False, name=None):\n return ivy.argsort(x, axis=axis, descending=descending)\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}]} | 561 | 228 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.